diff --git "a/1153.jsonl" "b/1153.jsonl" new file mode 100644--- /dev/null +++ "b/1153.jsonl" @@ -0,0 +1,461 @@ +{"seq_id": "32233043", "text": "\nimport csv\nimport json\n\ncsv_file = open('BirdsBeingDicks.csv', 'r') # Open read only\njson_file = open('BBD.json', 'w') # Write only JSON\ncol_names = (\"created_utc\", \"score\", \"domain\", \"_id\", \"title\", \"author\", \"ups\", \"downs\", \"num_comments\", \"permalink\", \"selftext\", \"link_flair_text\", \"over_18\", \"thumbnail\", \"subreddit_id\", \"edited\", \"link_flair_css_class\", \"author_flair_css_class\", \"is_self\", \"name\", \"url\", \"distinguished\")\nread_csv = csv.DictReader(csv_file, col_names);\n\ni=0\nfor row in read_csv:\n\trow_necessary = {}\n\trow_necessary['time'] = row['created_utc']\n\trow_necessary['score'] = row['score']\n\trow_necessary['domain'] = row['domain']\n\trow_necessary['_id'] = row['_id']\n\trow_necessary['title'] = row['title']\n\trow_necessary['author'] = row['author']\n\trow_necessary['ups'] = row['ups']\n\trow_necessary['downs'] = row['downs']\n\trow_necessary['num_comments'] = row['num_comments']\n\trow_necessary['permalink'] = row['permalink']\n\trow_necessary['name'] = row['name']\n\trow_necessary['url'] = row['url']\n\tif i != 0:\n\t\tjson.dump(row_necessary, json_file, sort_keys=False)\n\t\tjson_file.write(\"\\n\")\n\ti+=1\n\njson_file.close()\njsonnew = open('BBDNEW.json','w');\nwith open('BBD.json') as f:\n\tcontent = f.readlines()\n\tfor line in content:\n\t\tif \"\\(\\)\\[\\]\" in line:\n\t\t\tline = line.replace(\"\\(\",\"\\\\(\")\n\t\t\tline = line.replace(\"\\)\",\"\\\\)\")\n\t\t\tline = line.replace(\"\\[\",\"\\\\[\")\n\t\t\tline = line.replace(\"\\]\",\"\\\\]\")\n\t\tjsonnew.write(line)\nf.close()\n", "sub_path": "importCSVtoJSON.py", "file_name": "importCSVtoJSON.py", "file_ext": "py", "file_size_in_byte": 1443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "csv.DictReader", "line_number": 8, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "350521340", "text": "import uuid\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Person:\r\n def __init__(self):\r\n self.__name = \"Vitalii\"\r\n self.__last_name = 'Kolobok'\r\n self.__uuid = uuid.uuid4().hex\r\n\r\n def __repr__(self):\r\n return f'{self.__dict__}'\r\n\r\n def creating_new_object(self, **kwargs):\r\n self_copy = deepcopy(self)\r\n for value in kwargs:\r\n setattr(self_copy, value, kwargs[value])\r\n return self_copy\r\n\r\n\r\nperson = Person()\r\n\r\n''' Сopying primary data and adding data '''\r\nperson_objects = person.creating_new_object(firm_car='Renault', model_car='Koleos',\r\n color_car=\"Grey\", government_number='AX2578EM')\r\nprint(person_objects)\r\nprint(person)\r\n", "sub_path": "DZ_7 rev_1.py", "file_name": "DZ_7 rev_1.py", "file_ext": "py", "file_size_in_byte": 739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "uuid.uuid4", "line_number": 9, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "625752915", "text": "from dataclasses import dataclass\nfrom base.common.models.request import BaseRequestModelKeys, SimpleRequestModel\n\n\n@dataclass\nclass GetTPOQueueDataRequestParams(BaseRequestModelKeys):\n COMPANY_ID: str = \"CompanyID\"\n CONTACT_ID: str = \"ContactID\"\n\n\nclass GetTPOQueueDataRequest(SimpleRequestModel):\n def __init__(self, company_id, contact_id, session_id, nonce, pretty_print):\n self.company_id = company_id\n self.contact_id = contact_id\n super().__init__(session_id=session_id, nonce=nonce, pretty_print=pretty_print)\n\n def to_params(self):\n args = super().to_params()\n args[GetTPOQueueDataRequestParams.COMPANY_ID] = self.company_id\n args[GetTPOQueueDataRequestParams.CONTACT_ID] = self.contact_id\n return args\n", "sub_path": "APIs/tpo/requests/get_tpo_queue_data.py", "file_name": "get_tpo_queue_data.py", "file_ext": "py", "file_size_in_byte": 773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "base.common.models.request.BaseRequestModelKeys", "line_number": 6, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 5, "usage_type": "name"}, {"api_name": "base.common.models.request.SimpleRequestModel", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "182082887", "text": "#!/usr/bin/python\n# coding=utf-8\n\n\nfrom django.template import loader,Context\nfrom django.http import HttpResponse\nfrom gallery.models import Blog\nfrom django.shortcuts import render\nfrom django.db.models import Q\n\ndef Posts(request):\n context = {}\n context['posts'] = Blog.objects.all()\n archive_link = time_line(request)\n context.update(archive_link)\n return render(request,'posts.html',context)\n \n\n\ndef blog_detail(request,id=None):\n context = {}\n blog = Blog.objects.get(pk=id)\n context['blog'] = blog\n# context['id'] = id\n return render(request,'blog_detail.html',context)\n\n\ndef search(request):\n context = {}\n key = request.GET.get('search','')\n context['key'] = key\n context['blogs'] = Blog.objects.filter(title__icontains=key).order_by('-timestamp')\n return render(request,'search.html',context)\n\ndef time_line(request):\n context = {}\n# all_time_line = []\n blogs_list = []\n# blogs = Blog.objects.all()\n blogs= Blog.objects.values('id','title', 'timestamp').order_by('timestamp')\n# for blog in blogs:\n# if blog.timestamp not in all_time_line:\n# all_time_line.append(blog.timestamp)\n# context['all_time_line'] = all_time_line\n dates = set([str(i['timestamp'].year)+str(i['timestamp'].month) for i in blogs])\n for i in dates:\n dic = {}\n b_info = []\n count = 0\n dic['year'] = i[:4]\n dic['month'] = i[4:]\n for obj in blogs:\n if str(obj['timestamp'].year) + str(obj['timestamp'].month) == i:\n dic_ = {}\n dic_['blog'] = obj\n b_info.append(dic_)\n count += 1\n dic['count'] = count\n dic['b_info'] = b_info\n blogs_list.append(dic)\n \n context['dates'] = blogs_list\n# context['blogs'] = blogs\n return context\n # return render(request,'posts.html',context)\n \ndef archive(request):\n context = {}\n post = []\n year = request.GET.get('year','')\n month = request.GET.get('month','')\n blogs = Blog.objects.filter(Q(timestamp__month = month), Q(timestamp__year = year))\n context['posts'] = blogs\n archive_link = time_line(request)\n context.update(archive_link)\n \n return render(request,'posts.html',context)\n", "sub_path": "Quentin/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gallery.models.Blog.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 13, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects.get", "line_number": 22, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects.values", "line_number": 40, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 40, "usage_type": "name"}, {"api_name": "gallery.models.Blog.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "279629574", "text": "import codecs\nimport os\ntry: # for pip >= 10\n from pip._internal.req import parse_requirements\n from pip._internal.download import PipSession\n from pip._internal.index import PackageFinder\nexcept ImportError: # for pip <= 9.0.3\n from pip.download import PipSession\n from pip.index import PackageFinder\n from pip.req import parse_requirements\nfrom setuptools import find_packages, setup\n\nroot_dir = os.path.abspath(os.path.dirname(__file__))\nrequirements_path = os.path.join(root_dir, 'requirements', 'base.txt')\n\nsession = PipSession()\nfinder = PackageFinder([], [], session=session)\nrequirements = parse_requirements(requirements_path, finder, session=session)\ninstall_requires = [r.name for r in requirements]\n\nversion = '2.2.3' # Don't forget to update docs/CHANGELOG.rst if you increment the version\n\nwith codecs.open('README.rst', 'r', 'utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"sbo-sphinx\",\n version=version,\n author=\"Jeremy Bowman\",\n author_email=\"jbowman@safaribooksonline.com\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Software Development :: Documentation',\n ],\n description=\"Sphinx configuration and libraries for Safari Books Online documentation\",\n long_description=long_description,\n url='http://github.com/safarijv/sbo-sphinx',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pip.download.PipSession", "line_number": 16, "usage_type": "call"}, {"api_name": "pip.index.PackageFinder", "line_number": 17, "usage_type": "call"}, {"api_name": "pip.req.parse_requirements", "line_number": 18, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 23, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 26, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "67977184", "text": "# -*- coding: utf-8 -*-\r\n\r\n# Define your item pipelines here\r\n#\r\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\r\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\r\n\r\n# import datetime\r\n# import redis\r\n# import json\r\n# from scrapy import signals, Request\r\n# from scrapy.exporters import JsonItemExporter\r\n# from scrapy.pipelines.images import ImagesPipeline\r\n# from scrapy.exceptions import DropItem\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nfrom tutorial.items import HuxiuItem\r\nfrom tutorial.model import db_connect, create_news_table, session_scope\r\nfrom tutorial.model.huxiu_model import HuXiuModel\r\nimport logging\r\nlog = logging.getLogger(__name__)\r\n\r\nclass HuxiuPipeline(object):\r\n def __init__(self):\r\n engine = db_connect()\r\n create_news_table(engine)\r\n self.Session = sessionmaker(bind=engine)\r\n\r\n def process_item(self, item, spider):\r\n if isinstance(item, HuxiuItem):\r\n link = item[\"link\"].encode(\"utf-8\")\r\n session = self.Session()\r\n obj = session.query(HuXiuModel).filter(HuXiuModel.link==link).first()\r\n if obj:\r\n if \"published\" in item:\r\n obj.published = item[\"published\"].encode(\"utf-8\")\r\n if \"desc\" in item:\r\n obj.desc = item[\"desc\"].encode(\"utf-8\")\r\n session.add(obj)\r\n session.commit()\r\n else:\r\n published = item[\"published\"].encode(\"utf-8\") if \"published\" in item else \"\"\r\n desc = item[\"desc\"].encode(\"utf-8\") if \"desc\" in item else \"\"\r\n obj = HuXiuModel(\r\n link=link,\r\n title=item[\"title\"].encode(\"utf-8\"),\r\n desc=desc,\r\n published=published,\r\n )\r\n session.add(obj)\r\n session.commit()\r\n # with session_scope(self.Session) as db:\r\n # db.add(obj)\r\n log.info(item)\r\n return item\r\n\r\n def open_spider(self, spider):\r\n \"\"\"This method is called when the spider is opened.\"\"\"\r\n pass\r\n\r\n def close_spider(self, spider):\r\n pass\r\n", "sub_path": "python/scrapy-spider/tutorial/pipeline/huxiu_pipe.py", "file_name": "huxiu_pipe.py", "file_ext": "py", "file_size_in_byte": 2204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "tutorial.model.db_connect", "line_number": 25, "usage_type": "call"}, {"api_name": "tutorial.model.create_news_table", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 27, "usage_type": "call"}, {"api_name": "tutorial.items.HuxiuItem", "line_number": 30, "usage_type": "argument"}, {"api_name": "tutorial.model.huxiu_model.HuXiuModel", "line_number": 33, "usage_type": "argument"}, {"api_name": "tutorial.model.huxiu_model.HuXiuModel.link", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tutorial.model.huxiu_model.HuXiuModel", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "225534113", "text": "import os\r\nimport argparse\r\nimport numpy as np\r\nimport pandas as pd\r\nimport nibabel as nib\r\nfrom ukbb_cardiac.common.cardiac_utils import get_frames\r\nfrom ukbb_cardiac.common.image_utils import np_categorical_dice\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--output_csv', metavar='csv_name', default='DM_table.csv', required=True)\r\n args = parser.parse_args()\r\n\r\n print('Creating accuracy spreadsheet file ...')\r\n\r\n if os.path.exists(args.output_csv):\r\n os.remove(args.output_csv)\r\n \r\n # Record ED ES frames to csv\r\n init = {'Data': [],\r\n 'EDLV': [],\r\n 'EDLVM': [],\r\n 'EDRV': [],\r\n 'ESLV': [],\r\n 'ESLVM': [],\r\n 'ESRV': [],\r\n }\r\n\r\n df = pd.DataFrame(init)\r\n\r\n root = './demo_image'\r\n folder_list = sorted(os.listdir(root))\r\n\r\n for folder in folder_list:\r\n folder_dir = os.path.join(root, folder)\r\n if os.path.exists('{0}/{1}_seg_sa_ED.nii.gz'.format(folder_dir, folder) and ('{0}/{1}_seg_sa_ES.nii.gz'.format(folder_dir, folder))\r\n and ('{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder))):\r\n \r\n seg_sa_ED = '{0}/{1}_seg_sa_ED.nii.gz'.format(folder_dir, folder)\r\n seg_sa_ES = '{0}/{1}_seg_sa_ES.nii.gz'.format(folder_dir, folder)\r\n seg_sa_ground_truth = '{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder)\r\n ##seg_sa_ED ='{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder) # To see Dice metric between same segmentations is 1\r\n \r\n seg_gt = nib.load(seg_sa_ground_truth).get_fdata()\r\n \r\n fr = get_frames(seg_gt, 'sa')\r\n seg_ED_gt = seg_gt[:, :, :, fr['ED']] \r\n seg_ES_gt = seg_gt[:, :, :, fr['ES']] \r\n \r\n dice_arr = np.zeros(6)\r\n ind = 0\r\n \r\n frames = ['ED','ES']\r\n segm = ['LV','LV Myocardium','RV']\r\n for fr in frames:\r\n print('\\nFor image {0}, Comparison between: {1} \\n'.format(folder, fr))\r\n\r\n seg_model = nib.load(seg_sa_ED).get_fdata() if fr == 'ED' else nib.load(seg_sa_ES).get_fdata()\r\n ##if fr == 'ED' : seg_model = seg_model[:,:,:,0] # To see Dice metric between same segmentations is 1\r\n \r\n \r\n for i in range(1,4): # Loop for all segmentations\r\n print('Calculate Dice metric for ',segm[i - 1])\r\n \r\n total_seg_ED = np.sum(seg_ED_gt == i, axis=(0, 1, 2))\r\n print('Seg num (', segm[i-1],') in ground truth ED: ',np.max(total_seg_ED))\r\n total_seg_ES = np.sum(seg_ES_gt == i, axis=(0, 1, 2))\r\n print('Seg num (', segm[i-1],') in ground truth ES: ',np.max(total_seg_ES))\r\n\r\n total_seg = np.sum(seg_model == i, axis=(0, 1, 2))\r\n print('Seg num in model: ', np.max(total_seg))\r\n \r\n #denom = seg_ED_gt.shape[0]* seg_ED_gt.shape[1]* seg_ED_gt.shape[2]\r\n \r\n if fr == 'ED':\r\n dice_metric = np_categorical_dice(seg_model, seg_ED_gt, i) if (total_seg + total_seg_ED > 0) else 0 \r\n else:\r\n dice_metric = np_categorical_dice(seg_model, seg_ES_gt, i) if (total_seg + total_seg_ES > 0) else 0\r\n \r\n print(\"Dice metric for {0}: %\".format(fr) , dice_metric * 100,'\\n')\r\n \r\n dice_arr[ind] = dice_metric * 100\r\n ind += 1\r\n print('{0} finished'.format(folder)) \r\n\r\n frames_dict = {'Data': [folder],\r\n 'EDLV': [dice_arr[0]],\r\n 'EDLVM': [dice_arr[1]],\r\n 'EDRV': [dice_arr[2]],\r\n 'ESLV': [dice_arr[3]],\r\n 'ESLVM': [dice_arr[4]],\r\n 'ESRV': [dice_arr[5]],\r\n }\r\n df1 = pd.DataFrame(frames_dict)\r\n df = df.append(df1, ignore_index = True)\r\n \r\n else:\r\n print('Error! Can not find one of the expected files: {0}/{1}_seg_sa_ED.nii.gz or {0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder))\r\n\r\n df.to_csv(args.output_csv, index = False)", "sub_path": "dice_calculator.py", "file_name": "dice_calculator.py", "file_ext": "py", "file_size_in_byte": 4422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "nibabel.load", "line_number": 44, "usage_type": "call"}, {"api_name": "ukbb_cardiac.common.cardiac_utils.get_frames", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 71, "usage_type": "call"}, {"api_name": "ukbb_cardiac.common.image_utils.np_categorical_dice", "line_number": 76, "usage_type": "call"}, {"api_name": "ukbb_cardiac.common.image_utils.np_categorical_dice", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "322192585", "text": "from scrapy.spiders import SitemapSpider, CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy import Request\nimport json\n\n\nfrom myproject.items import RestaurantJson\n\nclass TabelogAllSpider(SitemapSpider):\n name = \"tabelog_all\"\n allowed_domains = [\"tabelog.com\"]\n # sitemap_urls = (\n # 'https://tabelog.com/sitemap.xml',\n # )\n #\n # sitemap_rules = [\n # (r'/sitemap_pc_area1_rstlst_\\d+.xml.gz$', 'parse_search_result'),\n # ]\n\n sitemap_rules = (\n (\"https://tabelog.com/sitemap_pc_area1_rstlst_1.xml.gz\", 'parse_search_result'),\n (\"https://tabelog.com/sitemap_pc_area1_rstlst_2.xml.gz\", 'parse_search_result'),\n )\n\n def parse_search_result(self, response):\n \"\"\"\n 地域ページからレストランページへのリンク\n https://tabelog.com/hokkaido/A0104/rstLst/cond04-00-03/RC999909/\n \"\"\"\n\n restaurant_pages = response.css('a.list-rst__rst-name-target cpy-rst-name').extract()\n if restaurant_pages is not None:\n for page in restaurant_pages:\n yield Request(response.urljoin(page), callback=self.parse_restaurant)\n\n def parse_restaurant(self, response):\n\n \"\"\"\n レストランページからJSON-LD\n :param response:\n :return:\n \"\"\"\n\n restaurant_json = json.loads(response.css('script[type=\"application/ld+json\"]').xpath('string()').extract_first())\n\n item = RestaurantJson(\n keyword = restaurant_json['name'],\n target = restaurant_json['@id']\n )\n\n yield item\n\n", "sub_path": "python/scrapy/myproject/myproject/spiders/tabelog_all.py", "file_name": "tabelog_all.py", "file_ext": "py", "file_size_in_byte": 1602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "scrapy.spiders.SitemapSpider", "line_number": 9, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "myproject.items.RestaurantJson", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "186203042", "text": "\"\"\"\n.. module:: clustermixin\n :platform: Darwin, Linux, Unix, Windows\n :synopsis: Contains a ClusterMixIn object to use for working with the nodes of a cluster\n\n.. moduleauthor:: Myron Walker \n\"\"\"\n\n__author__ = \"Myron Walker\"\n__copyright__ = \"Copyright 2020, Myron W Walker\"\n__credits__ = []\n__version__ = \"1.0.0\"\n__maintainer__ = \"Myron Walker\"\n__email__ = \"myron.walker@gmail.com\"\n__status__ = \"Development\" # Prototype, Development or Production\n__license__ = \"MIT\"\n\nfrom typing import Dict, List, Tuple\n\nfrom akit.mixins.integration import IntegrationMixIn\n\nclass ClusterMixIn(IntegrationMixIn):\n \"\"\"\n This is a mock playback device.\n \"\"\"\n\n pathbase = \"/clusters\"\n\n def __init__(self, *args, role=None, **kwargs):\n \"\"\"\n The default contructor for an :class:`IntegrationMixIn`.\n \"\"\"\n super(ClusterMixIn, self).__init__(*args, role=role, **kwargs)\n\n if self.pathbase is None:\n raise ValueError(\"The 'pathbase' class member variable must be set to a unique name for each integration class type.\")\n\n self.context.insert(self.pathbase , self)\n return\n\n @classmethod\n def attach_to_environment(cls, constaints: Dict={}):\n \"\"\"\n This API is called so that the IntegrationMixIn can process configuration information. The :class:`IntegrationMixIn`\n will verify that it has a valid environment and configuration to run in.\n\n :raises :class:`akit.exceptions.AKitMissingConfigError`, :class:`akit.exceptions.AKitInvalidConfigError`:\n \"\"\"\n return\n\n @classmethod\n def collect_resources(cls):\n \"\"\"\n This API is called so the `IntegrationMixIn` can connect with a resource management\n system and gain access to the resources required for the automation run.\n\n :raises :class:`akit.exceptions.AKitResourceError`:\n \"\"\"\n\n return\n\n @classmethod\n def diagnostic(cls, diag_level: int, diag_folder: str):\n \"\"\"\n The API is called by the :class:`akit.sequencer.Sequencer` object when the automation sequencer is\n building out a diagnostic package at a diagnostic point in the automation sequence. Example diagnostic\n points are:\n\n * pre-run\n * post-run\n\n Each diagnostic package has its own storage location so derived :class:`akit.scope.ScopeMixIn` objects\n can simply write to their specified output folder.\n\n :param diag_level: The maximum diagnostic level to run dianostics for.\n :param diag_folder: The output folder path where the diagnostic information should be written.\n \"\"\"\n\n return\n\n @classmethod\n def establish_connectivity(cls) -> Tuple[List[str], Dict]:\n \"\"\"\n This API is called so that this subclass of the `IntegrationMixIn` can establish connectivity with any\n compute or storage resources.\n\n :raises :class:`akit.exceptins.AKitInitialConnectivityError`:\n \"\"\"\n\n return\n\n @classmethod\n def establish_presence(cls) -> Tuple[List[str], Dict]:\n \"\"\"\n This API is called so the `IntegrationMixIn` can establish presence with any compute or storage\n resources.\n\n :returns: A tuple with a list of error messages for failed connections and dict of connectivity\n reports for devices devices based on the coordinator.\n \"\"\"\n return", "sub_path": "packages/akit/integration/cluster/clustermixin.py", "file_name": "clustermixin.py", "file_ext": "py", "file_size_in_byte": 3516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "akit.mixins.integration.IntegrationMixIn", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "68642674", "text": "#! python3\r\n# downloadXkcd.py - Dowloads every single XKCD comic\r\n\r\nimport requests, os, bs4\r\n\r\nurl = 'http://xkcd.com' # starting url\r\nos.chdir('C:\\\\Users\\\\Mack W\\\\Documents\\\\Python\\\\' +\r\n 'automateTheBoringStuffWithPython\\\\' +\r\n 'Chapter 11 Web Scraping\\\\XKCD Comics')\r\nwhile not url.endswith('#'):\r\n # Download the page\r\n print('Page: ', end='')\r\n print(url)\r\n res = requests.get(url)\r\n res.raise_for_status()\r\n soup = bs4.BeautifulSoup(res.text)\r\n \r\n # Find the URL of the comic image\r\n comicElem = soup.select('#comic img')\r\n if comicElem == []:\r\n print('Could not find comic image.')\r\n else:\r\n comicUrl = 'http:' + comicElem[0].get('src')\r\n \r\n # Download the image\r\n print('Image: ', end='')\r\n print(comicUrl)\r\n res = requests.get(comicUrl)\r\n res.raise_for_status()\r\n \r\n # Save the image\r\n imageName = os.path.basename(comicUrl)\r\n imageFile = open(imageName, 'wb')\r\n for chunk in res.iter_content(100000):\r\n imageFile.write(chunk)\r\n imageFile.close()\r\n \r\n # Get the Prev button's url\r\n prevLink = soup.select('a[rel=\"prev\"]')[0]\r\n url = 'http://xkcd.com' + prevLink.get('href')\r\n\r\nprint('Done.')\r\n", "sub_path": "Command Line Programs/downloadXkcd.py", "file_name": "downloadXkcd.py", "file_ext": "py", "file_size_in_byte": 1229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.chdir", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "377362762", "text": "import kivy\nimport sqlite3\nimport json\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.spinner import Spinner\nfrom kivy.uix.widget import Widget\n#from.kivy.properties import ObjectProperty\n\n\nreadedNote = None\npriorToSearch=10\nnowEditing=10\nseenNotes = list()\nlastId = 0\nindex = 0\n\n#Builder.load_file('style.kv')\n\nclass MyGrid(Widget):\n pass\n def __init__(self, **kwargs):\n super(MyGrid, self).__init__(**kwargs)\n\n self.addButton.bind(on_press=self.add)\n self.buttonDid.bind(on_press=self.didIt)\n self.buttonLater.bind(on_press=self.doItLater)\n\n self.aboutBt.bind(on_press=self.about)\n self.showAllBt.bind(on_press=self.showAll)\n\n configFile = open(\"config.json\", \"r\")\n config = json.load(configFile)\n\n self.addButton.text = config.get(\"addButton\", \"+\")\n self.buttonDid.text = config.get(\"doneButton\", \"DONE\")\n self.buttonLater.text = config.get(\"laterButton\", \"LATER\")\n self.showAllBt.text = config.get(\"showAllButton\", \"SHOW ALL\")\n self.aboutBt.text = config.get(\"aboutButton\", \"ABOUT\")\n self.settingsBt.text = config.get(\"settingsButton\", \"SETTINGS\")\n\n self.findNew(self)\n\n\n def about(self, obj):\n global index\n self.noteLabel.text = \"By: Marek Maskarinec \\n version: 0.1\"\n index = 0\n\n def showAll(self, obj):\n global index\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards\")\n cards = c.fetchall()\n print(cards)\n if cards is not []:\n for i in range(len(cards)):\n self.noteLabel.text += cards[i][0] + \"\\n\" + str(cards[i][1]) + \"\\n\" + \"--------\" + \"\\n\"\n else:\n self.noteLabel.text = \"No notes\"\n conn.close()\n index = 0\n\n def add(self, obj):\n global index\n noteText = self.addTextBox.text\n priority = int(self.priorityBox.text )\n print(priority)\n print(noteText)\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n #c.execute(\"\"\"CREATE TABLE cards\n # (note text, priority number)\"\"\")\n c.execute(\"INSERT INTO cards VALUES (?, ?)\", (noteText, priority))\n c.execute(\"SELECT * FROM cards ORDER BY priority DESC\")\n conn.commit()\n conn.close()\n index = 0\n #nowEditing = nowEditing - 1\n\n def didIt(self, obj):\n global index\n global priorToSearch\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards\")\n cards = c.fetchall()\n if len(cards) > 0 and index < len(cards):\n c.execute('DELETE FROM cards WHERE note=?', (cards[index][0],))\n conn.commit()\n conn.close()\n if len(cards) > 0:\n self.findNew(obj)\n #index += 1\n\n def doItLater(self, obj):\n global index\n self.findNew(obj)\n index += 1\n\n def findNew(self, obj):\n global index\n print(index)\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards ORDER BY priority DESC\")\n #c.execute(\"SELECT * FROM cards\")\n cards = c.fetchall()\n print(cards)\n conn.close()\n if index >= len(cards):\n index = 0\n\n if len(cards) > 0:\n self.noteLabel.text = cards[index][0]\n else:\n self.noteLabel.text = \"no notes\"\n \"\"\"global priorToSearch\n global seenNotes\n global lastId\n cycleNumber = 0\n while True:\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards WHERE priority=?\", (priorToSearch,))\n recordRow = c.fetchone()\n if recordRow is not None:\n c.execute(\"SELECT rowid, * FROM cards WHERE note=?\", (recordRow[0],))\n lastId = c.fetchone\n else:\n self.noteLabel.text = \"Create note first\"\n\n if recordRow is None:\n self.noteLabel.text = \"\"\n priorToSearch = priorToSearch - 1\n if priorToSearch < 1:\n if cycleNumber == 1:\n break\n else:\n priorToSearch = 10\n cycleNumber = cycleNumber + 1\n #seenNotes = list()\n else:\n print(seenNotes)\n print(lastId)\n if lastId not in seenNotes:\n print(recordRow)\n self.noteLabel.text = recordRow[0]\n #priorityLabel.config(text=recordRow[1])\n seenNotes.append(lastId)\n break\n else:\n priorToSearch = priorToSearch - 1\n\n conn.commit()\n conn.close()\n priorToSearch = priorToSearch -1\"\"\"\n\n\nclass Window(App):\n\n def build(self):\n return MyGrid()\n\nwindow = Window()\n\nwindow.run()\n", "sub_path": "taskDeckAndoid.py", "file_name": "taskDeckAndoid.py", "file_ext": "py", "file_size_in_byte": 5191, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "kivy.uix.widget.Widget", "line_number": 24, "usage_type": "name"}, {"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 109, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 165, "usage_type": "name"}]} +{"seq_id": "115505475", "text": "import os\nimport logging\nimport logging.handlers\nimport errno\n\n\ndef mkdir_p(path):\n \"\"\"http://stackoverflow.com/a/600612/190597 (tzot)\"\"\"\n try:\n os.makedirs(path, exist_ok=True) # Python>3.2\n except TypeError:\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise\n\n\nclass LinkFileHandler(logging.handlers.RotatingFileHandler):\n def __init__(self, filename, mode='a', encoding=None, delay=0, maxBytes=10485760, backupCount=2):\n mkdir_p(os.path.dirname(filename))\n logging.FileHandler.__init__(self, filename, mode, encoding, delay)\n\n\ndef Logger():\n # Sets log file path.\n log_file = os.path.join(os.path.dirname(__file__), 'logs/datil_link.log')\n \n # Return a logger with the specified name.\n mylogger = logging.getLogger(\"MyLogger\")\n handler = LinkFileHandler(log_file, maxBytes=10485760, backupCount=2)\n handler = logging.handlers.RotatingFileHandler(log_file)\n\n # Sets the threshold for this logger to lvl. Logging messages which are less severe than lvl will be ignored.\n mylogger.setLevel(logging.DEBUG)\n \n # Sets format of record in log file\n formatter = logging.Formatter('%(asctime)s - %(module)-10s - %(levelname)-8s %(message)s', '%d-%m-%Y %H:%M:%S')\n handler.setFormatter(formatter)\n \n # Adds the specified handler to logger \"MyLogger\"\n mylogger.addHandler(handler)\n\n return mylogger\n\n\nmylogger = Logger()\n", "sub_path": "logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.makedirs", "line_number": 10, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.handlers", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.FileHandler.__init__", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "108828403", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom hamcrest import (assert_that, equal_to, is_, is_not, contains,\n has_entries, starts_with, has_length) # noqa: H310\nimport requests\nfrom stepler import base\nfrom stepler.third_party import waiter\n\nfrom vapor import settings\n\n\nclass LBaaSSteps(base.BaseSteps):\n \"\"\"LBaaS steps.\"\"\"\n\n def _check_presence(self, objs, list_method, expected_presence, timeout=0):\n def _check_presence():\n all_objs = list_method()\n matcher = is_\n if not expected_presence:\n matcher = is_not\n return waiter.expect_that(\n all_objs,\n matcher(\n contains(*[has_entries(id=obj['id']) for obj in objs])))\n\n waiter.wait(_check_presence, timeout_seconds=timeout)\n\n def create_lb(self, name, subnet, **kwargs):\n \"\"\"Create loadbalancer and wait it became to online.\"\"\"\n loadbalancer = self._client.lbaas_loadbalancers.create(\n name=name, vip_subnet_id=subnet['id'], **kwargs)\n\n loadbalancer = self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return loadbalancer\n\n def delete_lbs(self, loadbalancers):\n \"\"\"Delete loadbalancer and wait for deletion to be completed.\"\"\"\n for loadbalancer in loadbalancers:\n self._client.lbaas_loadbalancers.delete(loadbalancer['id'])\n\n self.check_lbs_presence(\n loadbalancers, timeout=settings.LBAAS_DELETE_TIMEOUT)\n\n def check_lb_provisioning_status(self,\n loadbalancer,\n expected_status=\"ACTIVE\",\n timeout=0):\n \"\"\"Check that loadbalancer has expected provisioning status.\"\"\"\n\n def _check_status():\n lb = self._client.lbaas_loadbalancers.get(loadbalancer['id'])\n waiter.expect_that(lb['provisioning_status'],\n is_not(starts_with('PENDING_')))\n return lb\n\n loadbalancer = waiter.wait(_check_status, timeout_seconds=timeout)\n assert_that(loadbalancer['provisioning_status'],\n equal_to(expected_status))\n return loadbalancer\n\n def check_lbs_presence(self,\n loadbalancers,\n expected_presence=True,\n timeout=0):\n \"\"\"Check that loadbalancer is present (or not).\"\"\"\n self._check_presence(\n loadbalancers,\n self._client.lbaas_loadbalancers.list,\n expected_presence,\n timeout=timeout)\n\n def cleanup_lbs(self, names):\n \"\"\"Remove all loadbalancers by names list.\"\"\"\n loadbalancers = []\n for name in names:\n for loadbalancer in self._client.lbaas_loadbalancers.find_all(\n name=name):\n loadbalancers.append(loadbalancer)\n self._client.lbaas_loadbalancers.delete(loadbalancer['id'])\n\n self.check_lbs_presence(\n loadbalancers,\n expected_presence=False,\n timeout=settings.LBAAS_DELETE_TIMEOUT)\n\n def create_listener(self, name, loadbalancer, protocol, protocol_port,\n **kwargs):\n \"\"\"Create LBaaS listener.\"\"\"\n listener = self._client.lbaas_listeners.create(\n name=name,\n loadbalancer_id=loadbalancer['id'],\n protocol=protocol,\n protocol_port=protocol_port,\n **kwargs)\n\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return listener\n\n def delete_listener(self, listener):\n \"\"\"Delete LBaaS listener.\"\"\"\n listener = self._client.lbaas_listeners.get(listener['id'])\n loadbalancers = listener['loadbalancers']\n self._client.lbaas_listeners.delete(listener['id'])\n for lb in loadbalancers:\n self.check_lb_provisioning_status(\n lb, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def cleanup_listeners(self, names):\n \"\"\"Remove all listeners by names list.\"\"\"\n for name in names:\n for listener in self._client.lbaas_listeners.find_all(name=name):\n self.delete_listener(listener)\n\n def create_pool(self, name, listener, protocol, lb_algorithm, **kwargs):\n \"\"\"Create LBaaS pool.\"\"\"\n pool = self._client.lbaas_pools.create(\n name=name,\n listener_id=listener['id'],\n protocol=protocol,\n lb_algorithm=lb_algorithm,\n **kwargs)\n\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return pool\n\n def delete_pool(self, pool):\n \"\"\"Create LBaaS pool.\"\"\"\n self._client.lbaas_pools.delete(pool['id'])\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def cleanup_pools(self, names):\n \"\"\"Remove all pools by names list.\"\"\"\n loadbalancers = []\n for name in names:\n for pool in self._client.lbaas_pools.find_all(name=name):\n self._client.lbaas_pools.delete(pool['id'])\n loadbalancers.extend(pool['loadbalancers'])\n\n for loadbalancer in loadbalancers:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def create_member(self, pool, address, protocol_port, subnet, **kwargs):\n \"\"\"Create LBaaS pool member.\"\"\"\n member = pool.members.create(\n address=address,\n protocol_port=protocol_port,\n subnet_id=subnet['id'],\n **kwargs)\n\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return member\n\n def delete_member(self, pool, member):\n \"\"\"Delete LBaaS pool member.\"\"\"\n pool.members.delete(member['id'])\n\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def check_balancing(self, ip, port, expected_count):\n \"\"\"Check that responses contains `expected_counts` variants.\"\"\"\n responses = set()\n for _ in range(expected_count * 3):\n r = requests.get(\"http://{}:{}/\".format(ip, port))\n r.raise_for_status()\n responses.add(r.text)\n assert_that(responses, has_length(expected_count))\n", "sub_path": "plugin_test/vapor/vapor/helpers/lbaas.py", "file_name": "lbaas.py", "file_ext": "py", "file_size_in_byte": 7280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "stepler.base.BaseSteps", "line_number": 22, "usage_type": "attribute"}, {"api_name": "stepler.base", "line_number": 22, "usage_type": "name"}, {"api_name": "hamcrest.is_", "line_number": 28, "usage_type": "name"}, {"api_name": "hamcrest.is_not", "line_number": 30, "usage_type": "name"}, {"api_name": "stepler.third_party.waiter.expect_that", "line_number": 31, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 31, "usage_type": "name"}, {"api_name": "hamcrest.contains", "line_number": 34, "usage_type": "call"}, {"api_name": "hamcrest.has_entries", "line_number": 34, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter.wait", "line_number": 36, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 36, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_DELETE_TIMEOUT", "line_number": 54, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 54, "usage_type": "name"}, {"api_name": "stepler.third_party.waiter.expect_that", "line_number": 64, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 64, "usage_type": "name"}, {"api_name": "hamcrest.is_not", "line_number": 65, "usage_type": "call"}, {"api_name": "hamcrest.starts_with", "line_number": 65, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter.wait", "line_number": 68, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 68, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 69, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 70, "usage_type": "call"}, {"api_name": "vapor.settings.LBAAS_DELETE_TIMEOUT", "line_number": 96, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 96, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 109, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 109, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 120, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 120, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 139, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 139, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 148, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 148, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 160, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 160, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 172, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 172, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 182, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 188, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 191, "usage_type": "call"}, {"api_name": "hamcrest.has_length", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "441899619", "text": "import numpy as np\nimport cv2\nimport math\n\n\ndef get_primary_background_color(img):\n \"\"\"\n Returns the primarily used color in the images, which is assumed to be the background color.\n\n :param img: this is the image\n :returns the primary hue tone.\n :rtype int\n \"\"\"\n\n hist = cv2.calcHist([img], [0], None, [256], [0, 256])\n # get most occurring color\n background_color = hist.argmax(axis=0)\n\n return background_color\n\n\ndef get_background_spot(img, background_color, spot_size=200):\n \"\"\"\n Returns a position in the image, which is the most similar spot to the background color.\n\n :param img: this is the image\n :param background_color: this is the background color\n :param spot_size: the size of the searched spot.\n The higher the value, the slower the search and up to a certain size more stable\n :returns x, y coordinate of the background spot.\n :rtype tuple\n \"\"\"\n\n spot_template = np.zeros((spot_size, spot_size, 3), np.uint8)\n spot_template[:, :, 0] = background_color\n spot_template[:, :, 1] = background_color\n spot_template[:, :, 2] = background_color\n\n # find big background spot\n method = cv2.TM_SQDIFF_NORMED\n result = cv2.matchTemplate(spot_template, img, method)\n # We want the minimum squared difference\n mn, _, location, _ = cv2.minMaxLoc(result)\n\n return location\n\n\ndef generate_binary_background_image(img, background_color, threshold=25):\n \"\"\"\n Returns a binary image, which where the background color with some threshold is separated from the rest.\n\n :param img: this is the image\n :param background_color: this is the background color\n :param threshold: the threshold around the primary background color, which still should belong to the background.\n :returns: binary image.\n :rtype: array\n \"\"\"\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, mask1 = cv2.threshold(gray, background_color + threshold, 255, 0)\n ret, mask2 = cv2.threshold(gray, background_color - threshold, 255, 0)\n combined = cv2.bitwise_not(cv2.bitwise_and(mask1, mask2))\n\n return combined\n\n\ndef separate_background(binary_background_img, background_location):\n \"\"\"\n Returns a binary image, where the background ist black and the image locations are white.\n\n :param binary_background_img: binary version of the image\n :param background_location: a location (x,y) where there is some background\n :returns: binary image. \n :rtype: array \n \"\"\"\n\n im_floodfill = binary_background_img.copy()\n h, w = binary_background_img.shape[:2]\n mask = np.zeros((h+2, w+2), np.uint8)\n\n cv2.floodFill(im_floodfill, mask, background_location, 128)\n\n im_floodfill[im_floodfill == 0] = 255\n im_floodfill[im_floodfill == 128] = 0\n\n return im_floodfill\n\n\ndef check_for_features(img, threshold=10):\n \"\"\"\n Returns true or false dependent on the amount of features (corners and edges) which are in the image.\n Used to remove images without content (only background).\n\n :param img: input image\n :param threshold: the necessary amount of features needed to be regarded as image\n :returns: boolean, if image as enough features \n :rtype: bool \n \"\"\"\n\n blur1 = cv2.GaussianBlur(img, (7, 7), 0)\n blur2 = cv2.GaussianBlur(img, (15, 15), 0)\n gradients = blur1 - blur2\n\n pixel_sum = np.sum(gradients[0:img.shape[0]-1, 0:img.shape[1]-1, 0:img.shape[2]-1])\n average = pixel_sum / (img.shape[0] * img.shape[1] * img.shape[2])\n\n return average > threshold\n\n\ndef crop_image_rectangles(img, binary_background_image, min_area=-100, max_dimension_relation=2.5, image_padding=10):\n \"\"\"\n Returns an array of images, which are cut out of the original image.\n The cut is based on the binary background image.\n During this process unrelevant (to small, to monoton, ...) images are sorted out.\n\n :param img: input image\n :param binary_background_image: binary image showing where background and where foreground is.\n :param min_area: the size(area) an image must at least have to be considered as an image.\n :param max_dimension_relation: the maximum relation between the width and the height of an image\n (-> strips are not allowed)\n :param image_padding: the padding with which image is cut out of the original photo.\n :returns: an array of all the images in the scan\n :rtype: array \n \"\"\"\n\n # initialize output images\n cropped_images = []\n\n im2, contours, hierarchy = cv2.findContours(binary_background_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n album_image_height = binary_background_image.shape[0]\n album_image_width = binary_background_image.shape[1]\n \n if min_area < 0:\n min_area = album_image_height * album_image_width / (-min_area)\n\n count_ignored_because_corner_distance = 0\n count_ignored_because_min_area = 0\n count_ignored_because_total_album = 0\n count_ignored_because_dimension_relation = 0\n\n for i in range(len(contours)):\n # the virtual corners correspond to the edges if every point should be in the image\n # the real corners are the contour points which are closest to the edge\n\n virtual_corners = [[album_image_width, album_image_height], [0, 0]]\n real_corners = [[album_image_width, album_image_height], [0, 0]]\n\n for j in range(len(contours[i])):\n if virtual_corners[0][0] > contours[i][j][0][0]:\n virtual_corners[0][0] = contours[i][j][0][0]\n if virtual_corners[0][1] > contours[i][j][0][1]:\n virtual_corners[0][1] = contours[i][j][0][1]\n if virtual_corners[1][0] < contours[i][j][0][0]:\n virtual_corners[1][0] = contours[i][j][0][0]\n if virtual_corners[1][1] < contours[i][j][0][1]:\n virtual_corners[1][1] = contours[i][j][0][1]\n\n if real_corners[0][0] + real_corners[0][1] > contours[i][j][0][0] + contours[i][j][0][1]:\n real_corners[0][0] = contours[i][j][0][0]\n real_corners[0][1] = contours[i][j][0][1]\n\n if real_corners[1][0] + real_corners[1][1] < contours[i][j][0][0] + contours[i][j][0][1]:\n real_corners[1][0] = contours[i][j][0][0]\n real_corners[1][1] = contours[i][j][0][1]\n\n # check if virtual corners are near real corners\n max_corner_distance = math.sqrt(album_image_width*album_image_width\n + album_image_height*album_image_height) / 20\n\n corner_distance_topleft = math.sqrt(math.pow(real_corners[1][0] - virtual_corners[1][0], 2)\n + math.pow(real_corners[1][1] - virtual_corners[1][1], 2))\n\n corner_distance_bottomright = math.sqrt(math.pow(real_corners[0][0] - virtual_corners[0][0], 2)\n + math.pow(real_corners[0][1] - virtual_corners[0][1], 2))\n\n if corner_distance_topleft > max_corner_distance or corner_distance_bottomright > max_corner_distance:\n count_ignored_because_corner_distance += 1\n continue\n\n image_width = abs(real_corners[0][0] - real_corners[1][0])\n image_height = abs(real_corners[0][1] - real_corners[1][1])\n image_area = abs(image_width * image_height)\n\n # dont save images that are the whole album image\n if img.shape[0] < image_height * 1.1 and img.shape[1] < image_width * 1.1:\n count_ignored_because_total_album += 1\n continue\n\n # dont save images, that are to small\n if image_area < min_area:\n count_ignored_because_min_area += 1\n continue\n\n # dont save images, that have weird dimensions\n if image_height/image_width > max_dimension_relation or image_width/image_height > max_dimension_relation:\n count_ignored_because_dimension_relation += 1\n continue\n\n # if there is enough space add some padding\n if real_corners[0][1] - image_padding > 0:\n real_corners[0][1] -= image_padding\n if real_corners[0][0] - image_padding > 0:\n real_corners[0][0] -= image_padding\n if real_corners[1][1] + image_padding < img.shape[0]:\n real_corners[1][1] += image_padding\n if real_corners[1][0] + image_padding < img.shape[1]:\n real_corners[1][0] += image_padding\n\n crop = img[real_corners[0][1]:real_corners[1][1],real_corners[0][0]:real_corners[1][0]]\n cropped_images.append(crop)\n\n return cropped_images\n\n\ndef validate_cropped_images(cropped_images, feature_threshold):\n \"\"\"\n Validated the cropped image by checking for feature.\n\n :param feature_threshold: the necessary amount of features needed to be regarded as image\n :param cropped_images: array - An array of cropped images\n :return: An array of validated cropped images\n :rtype array\n \"\"\"\n valid_cropped_images = []\n\n for image in cropped_images:\n enough_features = check_for_features(image, feature_threshold)\n if enough_features:\n valid_cropped_images.append(image)\n\n return valid_cropped_images\n", "sub_path": "imextract/backgroundremover.py", "file_name": "backgroundremover.py", "file_ext": "py", "file_size_in_byte": 9186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.calcHist", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.TM_SQDIFF_NORMED", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.matchTemplate", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.minMaxLoc", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 79, "usage_type": "attribute"}, {"api_name": "cv2.floodFill", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 168, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 171, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 171, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 172, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 174, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 174, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "382957105", "text": "# Estimate extrinsic camera parameters\n#\n# camera calibration for distorted images with chess board samples\n#\n# based on: https://github.com/opencv/opencv/blob/master/samples/python/calibrate.py\n#\n# Dylan Campbell \n\nimport os\nimport cv2 as cv\nimport argparse\nimport json\nimport pickle\nimport subprocess\nimport numpy as np\nfrom glob import glob\nfrom pdb import set_trace as st\n\nscenes = ['01', '02', '04', '05', '06', '07', '08', '09', '10', '11']\ncams = ['dev1', 'dev2', 'dev3']\n\ndef get_extrinsic_parameters(args):\n calib_path = os.path.join(args.dataset_dir, 'Calibration')\n for scene in scenes:\n scene_path = os.path.join(calib_path, scene)\n \n cam = 'dev2' # Compute cameras w.r.t dev2\n cam_path = os.path.join(scene_path, cam)\n img_mask = os.path.join(cam_path, 'images', '????.png')\n img_names = glob(img_mask)\n color_param_filename = os.path.join(cam_path, 'ColorIns.txt')\n rgb_ins_params = get_rgb_ins_params(color_param_filename)\n\n cam1 = 'dev1' # Compute cameras w.r.t dev2\n cam_path1 = os.path.join(scene_path, cam1)\n img_mask1 = os.path.join(cam_path1, 'images', '????.png')\n img_names1 = glob(img_mask1)\n cam3 = 'dev3' # Compute cameras w.r.t dev2\n cam_path3 = os.path.join(scene_path, cam3)\n img_mask3 = os.path.join(cam_path3, 'images', '????.png')\n img_names3 = glob(img_mask3)\n\n pattern_size = (4, 3) # Number of inner corners per a chessboard row and column\n pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)\n pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)\n pattern_points *= args.square_size\n\n obj_points = []\n img_points = []\n img_points1 = []\n img_points3 = []\n h, w = cv.imread(img_names[0], cv.IMREAD_GRAYSCALE).shape[:2]\n\n def processImage(fn):\n # print('processing %s... ' % fn)\n img = cv.imread(fn, 0)\n if img is None:\n print(\"Failed to load\", fn)\n return None\n\n # img = cv.flip(img, 1) # Flip LR\n # cv.imwrite(fn, img)\n\n assert w == img.shape[1] and h == img.shape[0], (\"size: %d x %d ... \" % (img.shape[1], img.shape[0]))\n found, corners = cv.findChessboardCorners(img, pattern_size)\n if found:\n term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)\n cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)\n if args.debug_dir:\n vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.drawChessboardCorners(vis, pattern_size, corners, found)\n _, name, _ = splitfn(fn)\n outfile = os.path.join(args.debug_dir, name + '_chess.png')\n cv.imwrite(outfile, vis)\n if not found:\n print('chessboard not found')\n return None\n # print(' %s... OK' % fn)\n return (corners.reshape(-1, 2), pattern_points)\n\n threads_num = args.num_threads\n if threads_num <= 1:\n chessboards1 = [processImage(fn) for fn in img_names1]\n chessboards3 = [processImage(fn) for fn in img_names3]\n chessboards = [processImage(fn) for fn in img_names]\n else:\n # print(\"Run with %d threads...\" % threads_num)\n from multiprocessing.dummy import Pool as ThreadPool\n pool = ThreadPool(threads_num)\n chessboards1 = pool.map(processImage, img_names1)\n chessboards3 = pool.map(processImage, img_names3)\n chessboards = pool.map(processImage, img_names)\n\n chessboards = [x for x in chessboards if x is not None]\n chessboards1 = [x for x in chessboards1 if x is not None]\n chessboards3 = [x for x in chessboards3 if x is not None]\n for (corners, pattern_points) in chessboards:\n img_points.append(corners)\n obj_points.append(pattern_points)\n for (corners, pattern_points) in chessboards1:\n img_points1.append(corners)\n for (corners, pattern_points) in chessboards3:\n img_points3.append(corners)\n\n # Calibrate cameras:\n camera_matrix_gt = np.float32(np.array([[rgb_ins_params[\"fx\"], 0.0, rgb_ins_params[\"cx\"]], [0.0, rgb_ins_params[\"fy\"], rgb_ins_params[\"cy\"]], [0.0, 0.0, 1.0]])) # fx and fy \n dist_coefs_gt = np.float32(np.array([0.0, 0.0, 0.0, 0.0]))\n flags=cv.CALIB_USE_INTRINSIC_GUESS + cv.CALIB_FIX_PRINCIPAL_POINT + cv.CALIB_FIX_ASPECT_RATIO + cv.CALIB_ZERO_TANGENT_DIST + cv.CALIB_FIX_K1 + cv.CALIB_FIX_K2 + cv.CALIB_FIX_K3 + cv.CALIB_FIX_K4 + cv.CALIB_FIX_K5 + cv.CALIB_FIX_K6\n rms, camera_matrix, dist_coefs, rvecs, tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), camera_matrix_gt, dist_coefs_gt, flags=flags)\n rms1, camera_matrix1, dist_coefs1, rvecs1, tvecs1 = cv.calibrateCamera(obj_points, img_points1, (w, h), camera_matrix_gt, dist_coefs_gt, flags=flags)\n rms3, camera_matrix3, dist_coefs3, rvecs3, tvecs3 = cv.calibrateCamera(obj_points, img_points3, (w, h), camera_matrix_gt, dist_coefs_gt, flags=flags)\n\n # if debug: undistort the image with the calibration\n for fn in img_names if args.debug_dir else []:\n _path, name, _ext = splitfn(fn)\n img_found = os.path.join(args.debug_dir, name + '_chess.png')\n outfile = os.path.join(args.debug_dir, name + '_undistorted.png')\n img = cv.imread(img_found)\n if img is None:\n continue\n h, w = img.shape[:2]\n newcameramtx, roi = cv.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h))\n dst = cv.undistort(img, camera_matrix, dist_coefs, None, newcameramtx)\n # crop and save the image\n x, y, w, h = roi\n dst = dst[y:y+h, x:x+w]\n print('Undistorted image written to: %s' % outfile)\n cv.imwrite(outfile, dst)\n\n flags=cv.CALIB_FIX_INTRINSIC\n rms, camera_matrix, dist_coefs, camera_matrix1, dist_coefs1, R21, T21, _, _ = cv.stereoCalibrate(obj_points, img_points, img_points1, camera_matrix, dist_coefs, camera_matrix1, dist_coefs1, (w, h), flags=flags)\n rms, camera_matrix, dist_coefs, camera_matrix3, dist_coefs3, R23, T23, _, _ = cv.stereoCalibrate(obj_points, img_points, img_points3, camera_matrix, dist_coefs, camera_matrix3, dist_coefs3, (w, h), flags=flags)\n\n camera_parameters = {\n \"K\": camera_matrix,\n \"dist_coefs\": dist_coefs,\n \"R\": np.eye(3),\n \"T\": np.zeros((3,1))\n }\n with open(os.path.join(cam_path, 'camera_parameters.pkl'), 'wb') as f:\n pickle.dump(camera_parameters, f)\n with open(os.path.join(cam_path, 'camera_parameters.json'), 'w') as outfile:\n camera_parameters_serialized = {key: value.tolist() for key, value in camera_parameters.items()}\n json.dump(camera_parameters_serialized, outfile)\n\n camera_parameters = {\n \"K\": camera_matrix1,\n \"dist_coefs\": dist_coefs1,\n \"R\": R21,\n \"T\": T21\n }\n with open(os.path.join(cam_path1, 'camera_parameters.pkl'), 'wb') as f:\n pickle.dump(camera_parameters, f)\n with open(os.path.join(cam_path1, 'camera_parameters.json'), 'w') as outfile:\n camera_parameters_serialized = {key: value.tolist() for key, value in camera_parameters.items()}\n json.dump(camera_parameters_serialized, outfile)\n\n camera_parameters = {\n \"K\": camera_matrix3,\n \"dist_coefs\": dist_coefs3,\n \"R\": R23,\n \"T\": T23\n }\n with open(os.path.join(cam_path3, 'camera_parameters.pkl'), 'wb') as f:\n pickle.dump(camera_parameters, f)\n with open(os.path.join(cam_path3, 'camera_parameters.json'), 'w') as outfile:\n camera_parameters_serialized = {key: value.tolist() for key, value in camera_parameters.items()}\n json.dump(camera_parameters_serialized, outfile)\n\ndef splitfn(fn):\n path, fn = os.path.split(fn)\n name, ext = os.path.splitext(fn)\n return path, name, ext\n\ndef get_rgb_ins_params(param_file):\n '''\n read the rgb intrinsic parameters file\n :param param_file: path to intrinsic parameters file\n :return:\n rgb_ins_params: a libfreenect2 ColorCameraParams object\n '''\n with open(param_file, 'r') as f:\n rgb_ins_params = [float(line.strip()) for line in f if line]\n\n rgb_camera_params_obj = {\n \"fx\" : rgb_ins_params[0],\n \"fy\" : rgb_ins_params[1],\n \"cx\" : rgb_ins_params[2],\n \"cy\" : rgb_ins_params[3],\n\n \"shift_d\" : rgb_ins_params[4],\n \"shift_m\" : rgb_ins_params[5],\n \"mx_x3y0\" : rgb_ins_params[6],\n \"mx_x0y3\" : rgb_ins_params[7],\n \"mx_x2y1\" : rgb_ins_params[8],\n \"mx_x1y2\" : rgb_ins_params[9],\n \"mx_x2y0\" : rgb_ins_params[10],\n \"mx_x0y2\" : rgb_ins_params[11],\n \"mx_x1y1\" : rgb_ins_params[12],\n \"mx_x1y0\" : rgb_ins_params[13],\n \"mx_x0y1\" : rgb_ins_params[14],\n \"mx_x0y0\" : rgb_ins_params[15],\n\n \"my_x3y0\" : rgb_ins_params[16],\n \"my_x0y3\" : rgb_ins_params[17],\n \"my_x2y1\" : rgb_ins_params[18],\n \"my_x1y2\" : rgb_ins_params[19],\n \"my_x2y0\" : rgb_ins_params[20],\n \"my_x0y2\" : rgb_ins_params[21],\n \"my_x1y1\" : rgb_ins_params[22],\n \"my_x1y0\" : rgb_ins_params[23],\n \"my_x0y1\" : rgb_ins_params[24],\n \"my_x0y0\" : rgb_ins_params[25]\n }\n return rgb_camera_params_obj\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_dir', type=str, default='/home/djcam/Documents/HDD/datasets/ikea/ikea_asm/',\n help='directory of the IKEA assembly dataset')\n parser.add_argument('--square_size', type=float, default=4.0,\n help='calibration chessboard square size (in centimetres)')\n parser.add_argument('--num_threads', type=int, default=4,\n help='number of threads for chessboard function')\n parser.add_argument('--debug_dir', type=str, default='',\n help='path for debug chessboard images')\n args = parser.parse_args()\n\n get_extrinsic_parameters(args)", "sub_path": "human_pose/calibration.py", "file_name": "calibration.py", "file_ext": "py", "file_size_in_byte": 10409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.indices", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.findChessboardCorners", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.TERM_CRITERIA_EPS", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.TERM_CRITERIA_COUNT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.cornerSubPix", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.drawChessboardCorners", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 74, "usage_type": "call"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.CALIB_USE_INTRINSIC_GUESS", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_PRINCIPAL_POINT", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_ASPECT_RATIO", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_ZERO_TANGENT_DIST", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K1", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K2", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K3", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K4", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K5", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K6", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.calibrateCamera", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.getOptimalNewCameraMatrix", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.CALIB_FIX_INTRINSIC", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cv2.stereoCalibrate", "line_number": 131, "usage_type": "call"}, {"api_name": "cv2.stereoCalibrate", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "580133782", "text": "# Inspired by http://stackoverflow.com/questions/4160175/detect-tap-with-pyaudio-from-live-mic\n# Adjusted by Rotem Hemo\n\nimport pyaudio\nimport struct\nimport math\n\nfrom audio_recorder import Record\n\n\nclass Segmenter:\n\tdef __init__(self, block_size_in_sec=0.02, rate=44100, channels=1, audio_format=pyaudio.paInt16, tap_threshold=0.010):\n\t\tself.recoder = Record()\n\t\tself.audio_stream = self.recoder.get_stream()\n\n\t\tself.block_size_in_sec = block_size_in_sec\n\t\tself.channels = channels\n\t\tself.rate = rate\n\t\tself.frames_per_block = int(self.rate * self.block_size_in_sec)\n\t\tself.audio_foramt = audio_format\n\n\t\tself.tap_threshold = tap_threshold\n\n\t\tself.max_tap_block = 0.15 / self.block_size_in_sec\n\t\tself.noisy_count = self.max_tap_block + 1\n\t\tself.quiet_count = 0\n\n\t\tself.over_sensitive = 15.0 / self.block_size_in_sec\n\t\tself.under_sensitive = 120.0 / self.block_size_in_sec\n\n\tdef close_strem(self):\n\t\tself.audio_stream.close()\n\n\t@staticmethod\n\tdef rms(data_block):\n\t\tsamples_count = len(data_block) / 2\n\t\tshort_normalize = (1.0 / 32768.0)\n\t\tstring_format = \"%dh\" % samples_count\n\t\tshorts = struct.unpack(string_format, data_block)\n\n\t\tsum_squares = 0.0\n\t\tfor sample in shorts:\n\t\t\tn = sample * short_normalize\n\t\t\tsum_squares += n * n\n\n\t\treturn math.sqrt(sum_squares / samples_count)\n\n\tdef grab_and_detect(self, callback):\n\n\t\t# grab a block of sound\n\t\tblock = self.audio_stream.read(self.frames_per_block)\n\n\t\t# get rms\n\t\tamplitude = self.rms(block)\n\n\t\tif amplitude > self.tap_threshold:\n\t\t\t# noisy block\n\t\t\tself.quiet_count = 0\n\t\t\tself.noisy_count += 1\n\t\t\tif self.noisy_count > self.over_sensitive:\n\t\t\t\t# turn down the sensitivity\n\t\t\t\tself.tap_threshold *= 1.1\n\t\telse:\n\t\t\t# quiet block.\n\n\t\t\tif 1 <= self.noisy_count <= self.max_tap_block:\n\t\t\t\tcallback()\n\n\t\t\tself.noisy_count = 0\n\n\t\t\tself.quiet_count += 1\n\t\t\tif self.quiet_count > self.under_sensitive:\n\t\t\t\t# turn up the sensitivity\n\t\t\t\tself.tap_threshold *= 0.9\n\n\n\n\n\n", "sub_path": "code/project/detection/segmenter.py", "file_name": "segmenter.py", "file_ext": "py", "file_size_in_byte": 1913, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pyaudio.paInt16", "line_number": 12, "usage_type": "attribute"}, {"api_name": "audio_recorder.Record", "line_number": 13, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 39, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "165893718", "text": "# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.12-x86_64/egg/dicom_tools/rescale.py\n# Compiled at: 2018-05-21 04:28:19\n# Size of source mod 2**32: 1918 bytes\nfrom __future__ import print_function\nimport numpy as np\nfrom skimage import exposure\nfrom skimage import img_as_ubyte, img_as_uint\n\ndef rescale16bit(imgIn, verbose=False):\n if imgIn.min() < 0:\n imgIn += abs(imgIn.min())\n imgOut = exposure.rescale_intensity(imgIn, in_range='uint16', out_range='uint16')\n if imgOut.min() < 0:\n print('rescale16bit: WARNING imgOut has negative value')\n imgOut = imgOut.astype(np.uint16)\n out = img_as_uint(imgOut)\n if verbose:\n print('rescale16bit')\n print('type(image) ', type(out))\n print('type(image[0][0]) ', type(out[0][0]))\n return out\n\n\ndef rescale8bit(imgIn, verbose=False):\n if imgIn.min() < 0:\n imgIn += abs(imgIn.min())\n imgOut = exposure.rescale_intensity(imgIn, in_range='uint16', out_range='uint8')\n if imgOut.min() < 0:\n print('rescale8bit: WARNING imgOut has negative value')\n imgOut = imgOut.astype(np.uint8)\n out = img_as_ubyte(imgOut)\n if verbose:\n print('rescale8bit')\n print('type(image) ', type(out))\n print('type(image[0][0]) ', type(out[0][0]))\n return out", "sub_path": "pycfiles/dicom_upload-0.1.2-py2.py3-none-any/rescale.cpython-37.py", "file_name": "rescale.cpython-37.py", "file_ext": "py", "file_size_in_byte": 1440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "skimage.exposure.rescale_intensity", "line_number": 15, "usage_type": "call"}, {"api_name": "skimage.exposure", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.uint16", "line_number": 18, "usage_type": "attribute"}, {"api_name": "skimage.img_as_uint", "line_number": 19, "usage_type": "call"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 30, "usage_type": "call"}, {"api_name": "skimage.exposure", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 33, "usage_type": "attribute"}, {"api_name": "skimage.img_as_ubyte", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "189827380", "text": "import os\nimport os.path\nimport jieba\nimport codecs\nimport pickle\n\nclass ProcessDocument:\n\tdef __init__(self,fileRoot,stopwordFile):\n\t\tif os.path.isdir(fileRoot) and os.path.exists(stopwordFile):\n\t\t\tself.fileRoot = fileRoot\n\t\t\tself.stopwordFile = stopwordFile\n\t\telse:\n\t\t\traise Exception(\"fileRoot is not a folder or soopwordFile wrong!\")\n\t\n\tdef __work4ConWordVec(self,dirpath,filename,documents):\n\t\twith codecs.open(os.path.join(dirpath,filename),\"r\",encoding=\"gbk\") as fr:\n\t\t\t[documents.append(line.strip()) for line in fr if not line.isspace() and line.find(\"content>\")==-1]\n\n\n\tdef constructWordVectorAndVocabulary(self):\n\t\tposDocuments=[]\n\t\tnegDocuments=[]\n\t\t[[self.__work4ConWordVec(dirpath,filename,posDocuments) if dirpath.endswith(\"pos\") else self.__work4ConWordVec(dirpath,filename,negDocuments) if dirpath.endswith(\"neg\") else none for filename in filenames]for dirpath,dirnames,filenames in os.walk(self.fileRoot)]\n\t\tself.documents = posDocuments + negDocuments\n\n\n\t\twith codecs.open(self.stopwordFile,\"r\",\"utf-8\") as fr:\n\t\t\tself.stopwords = [line.strip() for line in fr if not line.isspace()]\n\n\t\t\n\t\tposwords=[(list(jieba.cut(document))) for document in posDocuments]\n\t\tnegwords=[(list(jieba.cut(document))) for document in negDocuments]\n\t\tself.words = poswords + negwords\n\n\n\n\t\twordstemp=[]\n\t\t[[wordstemp.append(word) for word in posword]for posword in poswords]\n\t\t[[wordstemp.append(word) for word in negword]for negword in negwords]\n\t\tself.vocabulary=list(set(wordstemp))\n\n\t\tself.boundary = (len(posDocuments),len(self.documents)) \n\n\n\tdef pickleSomething(self,pickleFileRoot):\n\t\tif not hasattr(self,'vocabulary'):\n\t\t\tself.constructWordVectorAndVocabulary()\n\t\twith open(pickleFileRoot,\"wb\") as fwb:\n\t\t\tpickle.dump(self,fwb)\n\n\n\ndef test1():\n\tobj = ProcessDocument(\"./corpus/all1\",\"./corpus/ChnStopList.txt\")\n\tobj.constructWordVectorAndVocabulary()\n\tprint(\"len(obj.documents): \",len(obj.documents))\n\tprint(\"obj.doucments[0]: \",obj.documents[0])\n\tprint(\"len(obj.words): \",len(obj.words))\n\tprint(\"obj.words[0] length: \",len(obj.words[0]),\" obj.words[0]: \",obj.words[0])\n\tprint(\"obj.words[1] length: \",len(obj.words[1]),\" obj.words[1]: \",obj.words[1])\n\tprint(\"self.boundary: \",obj.boundary)\n\tprint(\"len(obj.vocabulary): \",len(obj.vocabulary))\n\tprint(\"pos before neg and obj.documents[0]=obj.posDocuments[0]: \",obj.documents[0])\n\tprint(\"neg after pos and obj.doucment[boundary]=obj.negDocuments[0]: \",obj.documents[obj.boundary[0]])\n\tprint(\"obj.vocabulary: \",obj.vocabulary)\n\tprint(\"obj.vocabulary[非常]: \",obj.vocabulary.count(\"非常\"))\n\ttemp=[]\n\t[[temp.append(1) if word==\"非常\" else None for word in wordstemp] for wordstemp in obj.words]\n\tprint(\"非常 in all documents nums : \",len(temp))\n\n\n\t# print(\"len(obj.posDocuments)\",len(obj.posDocuments),\" len(obj.negDocuments)\",len(obj.negDocuments))\n\t# print(\"posDocuments[0]: \",obj.posDocuments[0])\n\t# print(\"negDocuments[0]: \",obj.negDocuments[0])\n\t# print(\"len(obj.poswords): \",len(obj.poswords),\" len(obj.poswords): \",len(obj.negwords))\n\t# print(\"poswords[0]: \",obj.poswords[0])\n\t# print(\"negwords[0]: \",obj.negwords[0])\n\t# print(\"len(vocabulary): \",len(obj.vocabulary))\n\t# print(obj.vocabulary)\n\ndef test():\n\tobj = ProcessDocument(\"./corpus/train\",\"./corpus/ChnStopList.txt\")\n\tobj.constructWordVectorAndVocabulary()\n\tobj.pickleSomething(\"./process.out\")\n\nif __name__=='__main__':\n\ttest()", "sub_path": "lda_python/sentimentProcess.py", "file_name": "sentimentProcess.py", "file_ext": "py", "file_size_in_byte": 3365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.isdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 23, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 27, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 31, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "357372022", "text": "from PyQt4 import QtGui, QtCore\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass WinMain(QtGui.QMainWindow):\n\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n\n self.resize(1500, 900)\n self.setWindowTitle(\"Reserver v0.1\")\n\n self.__MDI_area = QtGui.QMdiArea()\n self.setCentralWidget(self.__MDI_area)\n\n self.__setupMenubar()\n\n self.__databasesRoot = \"/Users/matthewjbelcher/Documents/Python/db\"\n self.__openMDI_subWindows = {}\n\n self.__projectStartDate = None\n self.__projectEndDate_o = None\n self.__projectEndDate_d = None\n self.__project_oLength = None\n self.__project_dLength = None\n\n self.show()\n\n def __setupMenubar(self):\n\n self.__menubar = self.menuBar()\n self.__menus = {}\n\n # File menu\n\n self.__menus[\"file\"] = self.__menubar.addMenu(\"&File\")\n\n # View\n\n self.__menus[\"view\"] = self.__menubar.addMenu(\"&View\")\n\n qAction_showProjectExplorer = QAction_ShowProjectExplorer(\"&Show project explorer\", self)\n self.__menus[\"view\"].addAction(qAction_showProjectExplorer)\n\n qAction_showImportExplorer = QAction_ShowImportExplorer(\"&Import data\", self)\n self.__menus[\"view\"].addAction(qAction_showImportExplorer)\n\n qAction_showReservingClassExplorer = QAction_ShowReservingClassExplorer(\"&Show reserving class types\", self)\n self.__menus['view'].addAction(qAction_showReservingClassExplorer)\n\n def classifyNewSubWindow(self, name, subWindow, type_):\n self.__openMDI_subWindows[name] = {\"subWindow\": subWindow,\n \"type\": type_}\n\n def declassifySubWindow(self, name):\n del self.__openMDI_subWindows[name]\n\n def addRClassTypeMenu(self, window):\n self.__menus['rClassTypes'] = self.__menubar.addMenu('&Reserving class types')\n\n qAction_addRClassType = QAction_AddRClassType('&Add type', self, window)\n self.__menus['rClassTypes'].addAction(qAction_addRClassType)\n\n qAction_removeRClassType = QAction_RemoveRClassType('&Remove selected type', self, window)\n self.__menus['rClassTypes'].addAction(qAction_removeRClassType)\n\n def removeRClassTypeMenu(self):\n self.__menubar.removeAction(self.__menus['rClassTypes'].menuAction())\n\n @property\n def openMDI_subWindows(self):\n return self.__openMDI_subWindows\n\n @property\n def MDI_area(self):\n return self.__MDI_area\n\n @property\n def databasesRoot(self):\n return self.__databasesRoot\n\n @property\n def projectStartDate(self):\n return self.__projectStartDate\n\n @projectStartDate.setter\n def projectStartDate(self, value):\n if isinstance(value, str):\n self.__projectStartDate = datetime.datetime.strptime(value, \"%Y-%m-%d\").date()\n else:\n self.__projectStartDate = value\n\n @property\n def projectEndDate_o(self):\n return self.__projectEndDate_o\n\n @projectEndDate_o.setter\n def projectEndDate_o(self, value):\n if isinstance(value, str):\n self.__projectEndDate_o = datetime.datetime.strptime(value, \"%Y-%m-%d\").date()\n else:\n self.__projectEndDate_o = value\n\n @property\n def projectEndDate_d(self):\n return self.__projectEndDate_d\n\n @projectEndDate_d.setter\n def projectEndDate_d(self, value):\n if isinstance(value, str):\n self.__projectEndDate_d = datetime.datetime.strptime(value, \"%Y-%m-%d\").date()\n else:\n self.__projectEndDate_d = value\n\n @property\n def project_oLength(self):\n return self.__project_oLength\n\n @project_oLength.setter\n def project_oLength(self, value):\n self.__project_oLength = value\n\n @property\n def project_dLength(self):\n return self.__project_dLength\n\n @project_dLength.setter\n def project_dLength(self, value):\n self.__project_dLength = value\n\n def oCount(self, oLength=None):\n if oLength is None:\n oLength = self.__project_oLength\n\n monthCount = (self.__projectEndDate_o.year - self.__projectStartDate.year) * 12 + \\\n (self.__projectEndDate_o.month - self.__projectStartDate.month) + 1\n oCount = (monthCount - 1) // oLength + 1\n\n return oCount\n\n def dCount(self, dLength=None):\n if dLength is None:\n dLength = self.__project_dLength\n\n monthCount = (self.__projectEndDate_d.year - self.__projectStartDate.year) * 12 + \\\n (self.__projectEndDate_d.month - self.__projectStartDate.month) + 1\n dCount = (monthCount - 1) // dLength + 1\n\n return dCount\n\n def oHeaders(self, oLength=0):\n if oLength == 0:\n oLength = self.__project_oLength\n\n oCount = self.oCount(oLength)\n\n oPeriods = [self.__projectStartDate + relativedelta(months=o * oLength) for o in range(oCount)]\n\n if oLength == 1:\n labels = [oPeriod.strftime(\"%b%y\") for oPeriod in oPeriods]\n elif oLength == 3:\n labels = [str(oPeriod.year) + \" Q\" + str((oPeriod.month - 1) // 3 + 1) for oPeriod in oPeriods]\n elif oLength == 6:\n labels = [str(oPeriod.year) + \" H\" + str((oPeriod.month - 1) // 6 + 1) for oPeriod in oPeriods]\n elif oLength == 12:\n labels = [oPeriod.year for oPeriod in oPeriods]\n else:\n oPeriods.append(self.__projectStartDate + relativedelta(months=oCount * oLength))\n labels = [oPeriods[o].strftime(\"%b%y\") + \"-\" + (oPeriods[o + 1] - relativedelta(days=1)).strftime(\"%b%y\")\n for o in range(oCount)]\n\n return labels\n\n def dHeaders(self, dLength=0, basis=\"Development\"):\n if dLength == 0:\n dLength = self.__project_dLength\n\n dCount = self.dCount(dLength)\n project_dCount = self.dCount()\n\n labels = []\n\n if basis == \"Development\":\n labels = [project_dCount - d * dLength for d in range(dCount)]\n labels.reverse()\n\n elif basis == \"Calendar\":\n labels = [(self.__projectEndDate_d - relativedelta(months=d)).strftime(\"%b%y\") for d in range(dCount)]\n labels.reverse()\n\n return labels\n\n\nclass QAction_ShowProjectExplorer(QtGui.QAction):\n\n def __init__(self, caption, parent):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n\n self.triggered.connect(self.__showProjectExplorer)\n\n def __showProjectExplorer(self):\n projectExplorer = self.__parent.openMDI_subWindows[\"Project Explorer\"]\n self.__parent.MDI_area.setActiveSubWindow(projectExplorer[\"subWindow\"].MDI_subWindow)\n\n projectExplorer[\"subWindow\"].MDI_subWindow.setWindowState(QtCore.Qt.WindowMaximized)\n\n\nclass QAction_ShowImportExplorer(QtGui.QAction):\n\n def __init__(self, caption, parent):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n\n self.triggered.connect(self.__showImportExplorer)\n\n def __showImportExplorer(self):\n try:\n self.__parent.openMDI_subWindows[\"Import Explorer\"]\n except KeyError:\n import mImporter\n projectPath = self.__parent.openMDI_subWindows[\"Project Explorer\"][\"subWindow\"].projectPath\n mImporter.SubWinImportExplorer(projectPath=projectPath, parent=self.parent())\n\n\nclass QAction_ShowReservingClassExplorer(QtGui.QAction):\n\n def __init__(self, caption, parent):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n\n self.triggered.connect(self.__showReservingClassExplorer)\n\n def __showReservingClassExplorer(self):\n try:\n self.__parent.openMDI_subWindows['Reserving class types']\n except KeyError:\n import mReservingClasses\n projectPath = self.__parent.openMDI_subWindows['Project Explorer']['subWindow'].projectPath\n mReservingClasses.SubWinReservingClassExplorer(projectPath=projectPath, parent=self.parent())\n\n\nclass QAction_AddRClassType(QtGui.QAction):\n\n def __init__(self, caption, parent, window):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n self.__window = window\n\n self.triggered.connect(self.__addRClassType)\n\n def __addRClassType(self):\n import mReservingClasses\n mReservingClasses.WinAddOrEditReservingClassType(parent=self.__window)\n\n\nclass QAction_RemoveRClassType(QtGui.QAction):\n\n def __init__(self, caption, parent, window):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n self.__window = window\n\n self.triggered.connect(self.__removeRClassType)\n\n def __removeRClassType(self):\n self.__window.removeRClassType()\n\n\nclass MDI_SubWindow(QtGui.QWidget):\n \"\"\" Base class for MDI sub-windows \"\"\"\n\n def __init__(self, MDI_area):\n QtGui.QWidget.__init__(self)\n\n self.__MDI_area = MDI_area\n self.__mainWindow = self.__MDI_area.parent()\n\n self.__setupWindow()\n\n def __setupWindow(self):\n\n self.__MDI_subWindow = self.__MDI_area.addSubWindow(self)\n self.__MDI_subWindow.resize(600, 600)\n\n self.__MDI_subWindow.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n\n self.__grid = QtGui.QGridLayout()\n self.__grid.setSpacing(0)\n self.__grid.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self.__grid)\n\n @property\n def MDI_area(self):\n return self.__MDI_area\n\n @property\n def mainWindow(self):\n return self.__mainWindow\n\n @property\n def MDI_subWindow(self):\n return self.__MDI_subWindow\n\n @property\n def grid(self):\n return self.__grid\n", "sub_path": "mWindows.py", "file_name": "mWindows.py", "file_ext": "py", "file_size_in_byte": 9774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PyQt4.QtGui.QMainWindow", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMainWindow.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QMainWindow", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMdiArea", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 90, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 158, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 169, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 170, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 189, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 195, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 195, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 198, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 198, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 198, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 208, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 208, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 211, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 211, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 214, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 214, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 214, "usage_type": "name"}, {"api_name": "mImporter.SubWinImportExplorer", "line_number": 226, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 229, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 229, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 232, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 232, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 232, "usage_type": "name"}, {"api_name": "mReservingClasses.SubWinReservingClassExplorer", "line_number": 244, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 247, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 247, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 250, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 250, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 250, "usage_type": "name"}, {"api_name": "mReservingClasses.WinAddOrEditReservingClassType", "line_number": 259, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 262, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 262, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 265, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 265, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 265, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 276, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 276, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget.__init__", "line_number": 280, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 280, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 280, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 292, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 292, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 294, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 294, "usage_type": "name"}]} +{"seq_id": "275545559", "text": "import matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.artist import setp\nimport pandas.core.common as com\nfrom pandas.compat import range, lrange, lmap, map, zip\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\nimport pandas as pd \nimport numpy as np \n\n\"\"\"\nThis module provides helper methods to carry out data distribution\nanalysis on flight data found on https://www.kaggle.com/usdot/flight-delays.\n\nThese methods are specific to the flight dataset and is not meant to be \ngeneric functions for other datasets.\n\"\"\"\n\ndef scatter_matrix_all(frame, alpha=0.5, figsize=None, grid=False, diagonal='hist', marker='.', density_kwds=None, hist_kwds=None, range_padding=0.05, **kwds):\n \n df = frame\n num_cols = frame._get_numeric_data().columns.values\n n = df.columns.size\n fig, axes = plt.subplots(nrows=n, ncols=n, figsize=figsize, squeeze=False)\n\n # no gaps between subplots\n fig.subplots_adjust(wspace=0, hspace=0)\n\n mask = com.notnull(df)\n marker = _get_marker_compat(marker)\n\n hist_kwds = hist_kwds or {}\n density_kwds = density_kwds or {}\n\n # workaround because `c='b'` is hardcoded in matplotlibs scatter method\n kwds.setdefault('c', plt.rcParams['patch.facecolor'])\n\n boundaries_list = []\n for a in df.columns:\n if a in num_cols:\n values = df[a].values[mask[a].values]\n else:\n values = df[a].value_counts()\n rmin_, rmax_ = np.min(values), np.max(values)\n rdelta_ext = (rmax_ - rmin_) * range_padding / 2.\n boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))\n\n for i, a in zip(lrange(n), df.columns):\n for j, b in zip(lrange(n), df.columns):\n ax = axes[i, j]\n\n if i == j:\n if a in num_cols: # numerical variable\n values = df[a].values[mask[a].values]\n # Deal with the diagonal by drawing a histogram there.\n if diagonal == 'hist':\n ax.hist(values, **hist_kwds)\n elif diagonal in ('kde', 'density'):\n from scipy.stats import gaussian_kde\n y = values\n gkde = gaussian_kde(y)\n ind = np.linspace(y.min(), y.max(), 1000)\n ax.plot(ind, gkde.evaluate(ind), **density_kwds)\n ax.set_xlim(boundaries_list[i])\n else: # categorical variable\n values = df[a].value_counts()\n ax.bar(list(range(df[a].nunique())), values)\n else:\n common = (mask[a] & mask[b]).values\n # two numerical variables\n if a in num_cols and b in num_cols:\n if i > j:\n ax.scatter(df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds)\n # The following 2 lines add the lowess smoothing\n ys = lowess(df[a][common], df[b][common])\n ax.plot(ys[:,0], ys[:,1], 'red')\n else:\n pearR = df[[a, b]].corr()\n ax.text(df[b].min(), df[a].min(), 'r = %.4f' % (pearR.iloc[0][1]))\n ax.set_xlim(boundaries_list[j])\n ax.set_ylim(boundaries_list[i])\n # two categorical variables\n elif a not in num_cols and b not in num_cols:\n if i > j:\n from statsmodels.graphics import mosaicplot\n mosaicplot.mosaic(df, [b, a], ax, labelizer=lambda k:'')\n # one numerical variable and one categorical variable\n else:\n if i > j:\n tol = pd.DataFrame(df[[a, b]])\n if a in num_cols:\n label = [ k for k, v in tol.groupby(b) ]\n values = [ v[a].tolist() for k, v in tol.groupby(b) ]\n ax.boxplot(values, labels=label)\n else:\n label = [ k for k, v in tol.groupby(a) ]\n values = [ v[b].tolist() for k, v in tol.groupby(a) ]\n ax.boxplot(values, labels=label, vert=False)\n\n ax.set_xlabel('')\n ax.set_ylabel('')\n\n _label_axis(ax, kind='x', label=b, position='bottom', rotate=True)\n _label_axis(ax, kind='y', label=a, position='left')\n\n if j!= 0:\n ax.yaxis.set_visible(False)\n if i != n-1:\n ax.xaxis.set_visible(False)\n\n for ax in axes.flat:\n setp(ax.get_xticklabels(), fontsize=8)\n setp(ax.get_yticklabels(), fontsize=8)\n return fig\n \n\ndef _label_axis(ax, kind='x', label='', position='top', ticks=True, rotate=False):\n from matplotlib.artist import setp\n if kind == 'x':\n ax.set_xlabel(label, visible=True)\n ax.xaxis.set_visible(True)\n ax.xaxis.set_ticks_position(position)\n ax.xaxis.set_label_position(position)\n if rotate:\n setp(ax.get_xticklabels(), rotation=90)\n elif kind == 'y':\n ax.yaxis.set_visible(True)\n ax.set_ylabel(label, visible=True)\n #ax.set_ylabel(a)\n ax.yaxis.set_ticks_position(position)\n ax.yaxis.set_label_position(position)\n return\n\ndef _get_marker_compat(marker):\n import matplotlib.lines as mlines\n import matplotlib as mpl\n if mpl.__version__ < '1.1.0' and marker == '.':\n return 'o'\n if marker not in mlines.lineMarkers:\n return 'o'\n return marker\n\ndef plotBarPercentage(data, groupAttr, dependencyAttr, axAttr, condition, filter=0):\n totaldf = data.groupby([groupAttr])[dependencyAttr].count()\n denomdf = data.loc[condition]\n denomdf = denomdf.groupby([groupAttr])[dependencyAttr].count()\n df = denomdf/totaldf*100\n df = df[df > filter]\n if len(df) > 0:\n ax = df.plot.bar(figsize=(14, 6), ax = axAttr)\n ax.set_title(dependencyAttr)\n ax.set_ylabel('Percentage')\n\ndef plotBar(data, groupAttr, dependencyAttr, axAttr, condition):\n df = data.loc[condition]\n df = df.groupby([groupAttr])[dependencyAttr].count()\n ax = df.plot.bar(figsize=(14, 6), ax = axAttr)\n ax.set_ylabel(dependencyAttr)\n\ndef plotBars(data, groupAttr, dependencyAttrs, rows, cols, conditions):\n fig, axes = plt.subplots(nrows=rows, ncols=cols)\n r = 0\n c = 0\n for i in range(len(dependencyAttrs)):\n plotBar(data, groupAttr, dependencyAttrs[i], axes[r,c], conditions[i])\n if c == cols-1:\n c = -1\n r = r + 1\n c = c + 1\n \ndef plotBarsPercentage(data, groupAttr, dependencyAttrs, rows, cols, conditions, filter = 0):\n fig, axes = plt.subplots(nrows=rows, ncols=cols)\n r = 0\n c = 0\n for i in range(len(dependencyAttrs)):\n if rows > 1:\n plotBarPercentage(data, groupAttr, dependencyAttrs[i], axes[r,c], conditions[i], filter)\n else:\n plotBarPercentage(data, groupAttr, dependencyAttrs[i], axes[c], conditions[i], filter)\n\n if c == cols-1:\n c = -1\n r = r + 1\n c = c + 1\n\ndef plotMapData(df, longAttr, latAttr, valAttr, figw=8, figh=8, initmarksize= 0.5):\n # setup Lambert Conformal basemap.\n plt.figure(figsize=(figw,figh))\n m = Basemap(width=12000000,height=9000000,projection='lcc',\n resolution='c',lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.)\n # draw a boundary around the map, fill the background.\n # this background will end up being the ocean color, since\n # the continents will be drawn on top.\n m.drawmapboundary(fill_color='aqua')\n # fill continents, set lake color same as ocean color.\n m.fillcontinents(color='coral',lake_color='aqua')\n # draw parallels and meridians.\n # label parallels on right and top\n # meridians on bottom and left\n parallels = np.arange(0.,81,10.)\n # labels = [left,right,top,bottom]\n m.drawparallels(parallels,labels=[False,True,True,False])\n meridians = np.arange(10.,351.,20.)\n m.drawmeridians(meridians,labels=[True,False,False,True])\n # plot blue dot on Boulder, colorado and label it as such.\n\n for lon, lat, mag in zip(df[longAttr].values, df[latAttr].values, df[valAttr].values):\n xpt,ypt = m(lon, lat)\n lonpt, latpt = m(xpt,ypt,inverse=True)\n msize = mag * initmarksize\n #map.plot(x, y, marker_string, markersize=msize)\n m.plot(xpt,ypt,'bo', markersize=msize) # plot a blue dot there \n\n plt.show()\n\ndef plotJointPlotSplice0_10_240_By(x, y, delayAttr, data):\n # Create dataset based on splice conditions\n flights_greater_than_0_and_less_than_10 = data.loc[\n (data[delayAttr] > 0)\n & (data[delayAttr] <= 10)\n ]\n flights_greater_than_10_and_less_than_240 = data.loc[\n (data[delayAttr] > 10)\n & (data[delayAttr] <= 240)\n ]\n\n flights_greater_than_240 = data.loc[\n (data[delayAttr] > 240)\n ]\n sns.jointplot(x=x, y=y, kind=\"kde\", data=flights_greater_than_0_and_less_than_10, size=4)\n sns.jointplot(x=x, y=y, kind=\"kde\", data=flights_greater_than_10_and_less_than_240, size=4)\n sns.jointplot(x=x, y=y, kind=\"kde\", data=flights_greater_than_240, size=4)\n\ndef plotJointPlot(x, y, delayAttr, data, title):\n df = data\n datasetSize = len(df)\n g = sns.jointplot(x=x, y=y, kind=\"kde\", data=df, size=4)\n txt = plt.title(title + \",\\n Dataset Size = \" + str(datasetSize), fontsize = 24, y = 0.5, x = 6)\n \ndef plotJointPlotSplice(x, y, delayAttr, data, cond, title):\n df = data.loc[cond]\n datasetSize = len(df)\n g = sns.jointplot(x=x, y=y, kind=\"kde\", data=df, size=4)\n txt = plt.title(title + \",\\n Dataset Size = \" + str(datasetSize), fontsize = 24, y = 0.5, x = 6)\n \ndef generateDistributionDF(data, timeAttr, monthAttr, delayAttr, aggfunc= np.sum):\n pivot = pd.pivot_table(data,index=[monthAttr, timeAttr],values=[delayAttr],aggfunc=aggfunc)\n pivot.reset_index(level=0, inplace=True)\n pivot.reset_index(level=0, inplace=True)\n return pivot\n\ndef plot3D(data, x, y, z):\n distdf = generateDistributionDF(data, y, x, z)\n distdf_avg = generateDistributionDF(data, y, x, z, np.mean) \n\n fig = plt.figure(figsize=(16, 6), dpi=80)\n\n #---- First subplot\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n\n surf = ax.plot_trisurf(distdf[x], distdf[y], distdf[z], cmap=plt.cm.jet, linewidth=0.03)\n fig.colorbar(surf)\n\n #---- Second subplot\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n surf = ax.plot_trisurf(distdf_avg[x], distdf_avg[y], distdf_avg[z], cmap=plt.cm.jet, linewidth=0.03)\n fig.colorbar(surf)\n\n plt.show() \n", "sub_path": "airport/chartlib.py", "file_name": "chartlib.py", "file_ext": "py", "file_size_in_byte": 10897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "seaborn.set_style", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "pandas.core.common.notnull", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.core.common", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 39, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.compat.zip", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.compat.lrange", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.compat.zip", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.compat.lrange", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.stats.gaussian_kde", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.compat.range", "line_number": 70, "usage_type": "call"}, {"api_name": "statsmodels.nonparametric.smoothers_lowess.lowess", "line_number": 78, "usage_type": "call"}, {"api_name": "statsmodels.graphics.mosaicplot.mosaic", "line_number": 89, "usage_type": "call"}, {"api_name": "statsmodels.graphics.mosaicplot", "line_number": 89, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.artist.setp", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.artist.setp", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.artist.setp", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.__version__", "line_number": 140, "usage_type": "attribute"}, {"api_name": "matplotlib.lines.lineMarkers", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.lines", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "pandas.compat.range", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "pandas.compat.range", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.compat.zip", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "seaborn.jointplot", "line_number": 233, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 234, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 235, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "seaborn.jointplot", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pandas.pivot_table", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 257, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 264, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 269, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}]} +{"seq_id": "645483251", "text": "from typing import List\n\n\"\"\"\n\nGiven the array queries of positive integers between 1 and m,\nyou have to process all queries[i] (from i=0 to i=queries.length-1)\naccording to the following rules:\n\nIn the beginning, you have the permutation P=[1,2,3,...,m].\nFor the current i, find the position of queries[i] in the permutation P\n(indexing from 0) and then move this at the beginning of the\npermutation P. Notice that the position of queries[i] in P\nis the result for queries[i].\nReturn an array containing the result for the given queries.\n\nExample :\nInput: queries = [3,1,2,1], m = 5\nOutput: [2,1,2,1]\nExplanation: The queries are processed as follow:\n\nFor i=0: queries[i]=3, P=[1,2,3,4,5], position of 3 in P is 2,\nthen we move 3 to the beginning of P resulting in P=[3,1,2,4,5].\n\nFor i=1: queries[i]=1, P=[3,1,2,4,5], position of 1 in P is 1,\nthen we move 1 to the beginning of P resulting in P=[1,3,2,4,5].\n\nFor i=2: queries[i]=2, P=[1,3,2,4,5], position of 2 in P is 2,\nthen we move 2 to the beginning of P resulting in P=[2,1,3,4,5].\n\nFor i=3: queries[i]=1, P=[2,1,3,4,5], position of 1 in P is 1,\nthen we move 1 to the beginning of P resulting in P=[1,2,3,4,5].\nTherefore, the array containing the result is [2,1,2,1].\n\nLink : https://leetcode.com/problems/queries-on-a-permutation-with-key/\n\n\"\"\"\n\n\n'''\nNaive approach\nThere are three things you need to know:\n1. Find index by element\n2. Delete by index .pop(index_position)\n3. Insert by index .insert(position,element)\n\nbut list is the incorrect DS to use in python because it is not optimized for\nmultiple insertions and deletions.\nLists are not optimized for modifications at the front,\nand somelist.insert(0, something) is an O(n) operation.\nAll of the above mentioned approaches are O(n)\n\npython tip :\nCheck out itertools count instead of using List.index\nsince list.index returns first occurance only\n'''\n\n\nclass solution():\n def process_queries(self, queries: List[int], m: int) -> List[int]:\n permutation = [element for element in range(1, m+1)]\n sol = []\n for index in range(0, len(queries)):\n value = queries[index]\n ele = permutation.index(value)\n sol.append(ele)\n permutation.pop(ele)\n permutation.insert(0, value)\n return sol\n\n\nobj = solution()\n# print(obj.process_queries([3, 1, 2, 1], 5))\n# print(obj.process_queries([4, 1, 2, 2], 4))\nprint(obj.process_queries([7, 5, 5, 8, 3], 8))\n\n\n'''\n\nhttps://cp-algorithms.com/data_structures/fenwick.html\n\n'''\n", "sub_path": "1409_queries_on_a_permutation_in_key.py", "file_name": "1409_queries_on_a_permutation_in_key.py", "file_ext": "py", "file_size_in_byte": 2520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "469749610", "text": "from dagster.core.instance import DagsterInstance\nfrom dagster.core.storage.event_log.sql_event_log import SqlEventLogStorage\n\n\ndef migrate_event_log_data(instance=None):\n '''\n Utility method to migrate the data in the existing event log records. Reads every event log row\n reachable from the instance and reserializes it to event log storage. Deserializing and then\n reserializing the event from storage allows for things like SQL column extraction, filling\n explicit default values, etc.\n '''\n if not instance:\n instance = DagsterInstance.get()\n\n event_log_storage = instance._event_storage # pylint: disable=protected-access\n if not isinstance(event_log_storage, SqlEventLogStorage):\n return\n\n for run in instance.get_runs():\n event_records_by_id = event_log_storage.get_logs_for_run_by_log_id(run.run_id)\n for record_id, event in event_records_by_id.items():\n event_log_storage.update_event_log_record(record_id, event)\n", "sub_path": "python_modules/dagster/dagster/core/storage/event_log/migration.py", "file_name": "migration.py", "file_ext": "py", "file_size_in_byte": 996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "dagster.core.instance.DagsterInstance.get", "line_number": 13, "usage_type": "call"}, {"api_name": "dagster.core.instance.DagsterInstance", "line_number": 13, "usage_type": "name"}, {"api_name": "dagster.core.storage.event_log.sql_event_log.SqlEventLogStorage", "line_number": 16, "usage_type": "argument"}]} +{"seq_id": "379040465", "text": "import json\nimport time\nfrom requests import get, post\nfrom config import FORM_RECOGNIZER_CONGIF\nfrom utilities.utilities import remove_file, decrypt_pdf\n\napim_key = FORM_RECOGNIZER_CONGIF['API_KEY']\npost_url = FORM_RECOGNIZER_CONGIF['ANALYZE_LAYOUT_ENDPOINT']\n\n\ndef init(filename, file_password, contentType='pdf', resultType=\"text\"):\n \"\"\"\n Initialize local form layout recognition process\n \"\"\"\n textResult = ''\n error_message = None\n if (contentType == 'pdf'):\n error, pdfFilename = decrypt_pdf(filename, password=file_password)\n filename = pdfFilename\n error_message = error\n\n if(not error_message):\n x = 'application' if contentType == 'pdf' else 'image'\n headers = {\n # Request headers\n 'Content-Type': f'{x}/{contentType}',\n 'Ocp-Apim-Subscription-Key': apim_key,\n }\n\n with open(filename, \"rb\") as f:\n data_bytes = f.read()\n\n try:\n resp = post(url=post_url, data=data_bytes, headers=headers)\n if resp.status_code != 202:\n textResult = f\"POST analyze failed:\\n{resp.text}\"\n\n get_url = resp.headers[\"operation-location\"]\n textResult = get_layout_results(get_url, resultType)\n remove_file(filename)\n\n except Exception as e:\n textResult = f\"POST analyze failed:\\n{str(e)}\"\n else:\n remove_file(filename)\n textResult = error_message\n\n return textResult\n\n\ndef parse_text(json_result):\n \"\"\"\n Parse final result from json response\n \"\"\"\n textResult = ''\n for result in json_result['analyzeResult']['readResults']:\n # textResult += f\"***Page No. {result['page']}***\\n\"\n for line in result['lines']:\n textResult += line['text']\n textResult += '\\n'\n textResult += '\\n'\n\n return textResult\n\n\ndef get_layout_results(get_url, resultType=\"text\"):\n \"\"\"\n Fetch requested form's layout results by using authorized token\n \"\"\"\n textResult = ''\n n_tries = 10\n n_try = 0\n wait_sec = 5\n stopProcess = False\n while (n_try < n_tries and not(stopProcess)):\n try:\n resp = get(url=get_url, headers={\n \"Ocp-Apim-Subscription-Key\": apim_key})\n resp_json = json.loads(resp.text)\n if resp.status_code != 200:\n textResult = f\"GET Layout results failed:\\n{resp_json}\"\n stopProcess = True\n\n status = resp_json[\"status\"]\n if status == \"succeeded\":\n if (resultType == \"text\"):\n textResult = parse_text(resp_json)\n elif (resultType == \"json\"):\n textResult = str(resp_json)\n stopProcess = True\n\n if status == \"failed\":\n textResult = f\"Layout Analysis failed:\\n{resp_json}\"\n stopProcess = True\n\n # Analysis still running. Wait and retry.\n time.sleep(wait_sec)\n n_try += 1\n\n except Exception as e:\n textResult = f\"GET analyze results failed:\\n{str(e)}\"\n stopProcess = True\n return textResult\n", "sub_path": "CognitiveAPI/process_forms/extract_local.py", "file_name": "extract_local.py", "file_ext": "py", "file_size_in_byte": 3168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "config.FORM_RECOGNIZER_CONGIF", "line_number": 7, "usage_type": "name"}, {"api_name": "config.FORM_RECOGNIZER_CONGIF", "line_number": 8, "usage_type": "name"}, {"api_name": "utilities.utilities.decrypt_pdf", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "utilities.utilities.remove_file", "line_number": 40, "usage_type": "call"}, {"api_name": "utilities.utilities.remove_file", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 77, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 79, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "424275303", "text": "#! /usr/bin/env python3\n#encoding: utf-8\n\nfrom datetime import datetime,timedelta\nfrom iotools import *\nfrom mytools import *\nfrom crawler import *\nfrom client import *\n\ndef search():\n\tprint(KWD)\n\tclient = TwitterClient()\n\tfw = JsonStorer('tw_search_%s' % KWD)\n\tdt,EndTm,inc = datetime(2014,11,30),datetime(2014,12,2),timedelta(1)\n\twhile dt < EndTm:\n\t\tst = dt.strftime('%Y-%m-%d')\n\t\tdt += inc\n\t\tet = dt.strftime('%Y-%m-%d')\n\t\tprint(st, et)\n\t\tquery = TW_QUERY.format((st, et))\n\t\turl = TW_SEARCH_URL.format(query)\n\t\treq =TwitterRequest(url)\n\t\treq.perform()\n\t\ttweets, cursor = client.parse_search(req)\n\t\twhile len(tweets)>0:\n\t\t\tprint(len(tweets), end=', ', flush=True)\n\t\t\tfw.write({'t':st, 'd':tweets})\n\t\t\turl = TW_SEARCH_SCROLL.format(query, cursor)\n\t\t\treq.set_url(url)\n\t\t\treq.perform()\n\t\t\ttweets, cursor = client.parse_search(req, False)\n\t\tprint()\n\tfw.close()\n\nif __name__=='__main__':\n\tsearch()\n\n\n", "sub_path": "nonparall.py", "file_name": "nonparall.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 14, "usage_type": "call"}, {"api_name": "client.parse_search", "line_number": 24, "usage_type": "call"}, {"api_name": "client.parse_search", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "574362305", "text": "import gym\nimport gym_stocks\nimport random\n\nenv = gym.make('Stocks-v0')\n# print(env.reset())\nenv.reset()\n\nfor i in range(10):\n\tprs = (random.randint(0,20)-10)/10\n\tdata,reward,done, _ = env.step(prs)\n\t# print(data)\n\tprint(\"act: {}, roi(reward): {}\".format(prs,reward))\n\tprint(\"---\")\n\t#print env.step(0)\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gym.make", "line_number": 5, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "29789618", "text": "import read_Puzzle\nimport copy\nfrom collections import deque\nimport math\nimport time\nfrom random import shuffle\n\nglobal counter\ncounter = 0\nglobal checkcounter\ncheckcounter = 0\n\n\n\ndef freeFlowDumb(puzzle,rows,columns,left, values,sourceA,sourceB,idx,frontier):\n if not frontier:\n return 0\n global counter\n pos_now = frontier.pop()\n counter += 1\n value = values[idx]\n\n puzzle_temp = copy.deepcopy(puzzle)\n if pos_now == sourceB[idx]:\n if check1(puzzle_temp,idx,values,sourceA[idx],sourceB[idx],rows,columns):\n if left == 1:\n print('done')\n print(counter)\n return puzzle\n pos_now = sourceA[idx+1]\n\n result = freeFlowDumb(puzzle_temp,rows,columns,left-1,values,sourceA,sourceB,idx+1,[pos_now])\n if result != 0 :\n return result\n return 0\n else:\n return 0\n else:\n if puzzle_temp[pos_now[0]][pos_now[1]] == '_':\n puzzle_temp[pos_now[0]][pos_now[1]] = value\n neighbors = getNeighbor(pos_now,rows,columns)\n shuffle(neighbors)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle_temp[neighbor[0]][neighbor[1]] == '_' or neighbor == sourceB[idx]:\n frontier.append(neighbor)\n result = freeFlowDumb(puzzle_temp,rows,columns,left,values,sourceA,sourceB,idx,frontier)\n if result != 0:\n return result\n return 0\n\n\ndef freeFlowSmart(puzzle,rows,columns,left, values,sourceA,sourceB,idx,frontier):\n if not frontier:\n return 0\n pos_now = frontier.pop()\n value = values[idx]\n global counter\n counter += 1\n global checkcounter\n\n puzzle_temp = copy.deepcopy(puzzle)\n if pos_now == sourceB[idx]:\n #print(checkcounter)\n\n if check1(puzzle_temp,idx,values,sourceA[idx],sourceB[idx],rows,columns) :\n if left == 1:\n print('done')\n print(counter)\n return puzzle\n\n pos_now = sourceA[idx+1]\n\n result = freeFlowSmart(puzzle_temp,rows,columns,left-1,values,sourceA,sourceB,idx+1,[pos_now])\n if result != 0 :\n return result\n return 0\n else:\n return 0\n else:\n if puzzle_temp[pos_now[0]][pos_now[1]] == '_' or pos_now == sourceA[idx]:\n puzzle_temp[pos_now[0]][pos_now[1]] = value\n check_move = check2(puzzle_temp,idx,values,sourceA,sourceB,rows,columns)\n if not check_move:\n puzzle_temp[pos_now[0]][pos_now[1]] = '_'\n else:\n neighbors = getNeighbor(pos_now,rows,columns)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle_temp[neighbor[0]][neighbor[1]] == '_' or neighbor == sourceB[idx]:\n frontier.append(neighbor)\n result = freeFlowSmart(puzzle_temp,rows,columns,left,values,sourceA,sourceB,idx,frontier)\n if result != 0:\n return result\n return 0\n\ndef freeFlowEcSmart(puzzle,rows,columns,left, values,sourceA,sourceB,idx,frontier):\n # this function is for extra credit\n if not frontier:\n return 0\n pos_now = frontier.pop()\n #print(puzzle)\n value = values[idx]\n global counter\n counter += 1\n global checkcounter\n #print(puzzle)\n #print(frontier)\n #puzzle_temp = copy.deepcopy(puzzle)\n\n if pos_now == sourceB[idx]:\n print(checkcounter)\n check_move3 = check3(puzzle, rows, columns, value)\n #check_move4 = check4(puzzle, rows, columns, sourceA, sourceB,idx,values)\n if check1(puzzle,idx,values,sourceA[idx],sourceB[idx],rows,columns) and check_move3 :\n if left == 1:\n print('done')\n print(counter)\n return puzzle\n #ordered_values,ordered_A,ordered_B = ordering (idx,values,sourceA,sourceB,puzzle,rows,columns)\n # pos_now = ordered_A[idx+1]\n # result = freeFlowEcSmart(puzzle,rows,columns,left-1,ordered_values,ordered_A,ordered_B,idx+1,[pos_now])\n pos_now = sourceA[idx + 1]\n result = freeFlowEcSmart(puzzle, rows, columns, left - 1, values, sourceA, sourceB, idx + 1,[pos_now])\n if result != 0 :\n return result\n return 0\n else:\n return 0\n else:\n if puzzle[pos_now[0]][pos_now[1]] == '_' or pos_now == sourceA[idx]:\n puzzle[pos_now[0]][pos_now[1]] = value\n\n check_move5 = check5(puzzle, idx, values, sourceA[idx], sourceB[idx], rows, columns, pos_now)\n\n #print(check_move3)\n if (not check_move5) :\n puzzle[pos_now[0]][pos_now[1]] = '_'\n elif not (check2(puzzle,idx,values,sourceA,sourceB,rows,columns)):\n puzzle[pos_now[0]][pos_now[1]] = '_'\n\n\n elif not (check4(puzzle, rows, columns, sourceA, sourceB, idx, values, pos_now)):\n puzzle[pos_now[0]][pos_now[1]] = '_'\n else:\n neighbors = getNeighbor_newnew(pos_now,rows,columns,idx,sourceB)\n # neighbors = getNeighbor(pos_now, rows, columns)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle[neighbor[0]][neighbor[1]] == '_' or neighbor == sourceB[idx]:\n frontier.append(neighbor)\n result = freeFlowEcSmart(puzzle,rows,columns,left,values,sourceA,sourceB,idx,frontier)\n if result != 0:\n return result\n if pos_now != sourceA[idx]:\n puzzle[pos_now[0]][pos_now[1]] = '_'\n return 0\n\n\n\n\ndef check1(puzzle,idx,values,sourceA,sourceB,rows,columns): # check basic constraints\n value = values[idx]\n # first check if the sourceA has only one same color source\n neighbors = getNeighbor(sourceA,rows,columns)\n count = 0\n for i in range(len(neighbors)):\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 1:\n return False\n\n # first check if the sourceB has only one same color source\n neighbors = getNeighbor(sourceB,rows,columns)\n count = 0\n for i in range(len(neighbors)):\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 1:\n return False\n\n # check non-source node\n for row in range(rows):\n for column in range(columns):\n if [row,column] != sourceA and [row,column] != sourceB:\n if puzzle[row][column] == value:\n count = 0\n neighbors = getNeighbor([row,column],rows,columns)\n for i in range(len(neighbors)):\n\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 2:\n return False\n return True\n\ndef check2(puzzle,idx,values,sourceA,sourceB,rows,columns):\n # check if there still are paths for other colors\n global checkcounter\n checkcounter += 1\n #print(checkcounter)\n #puzzle_temp = copy.deepcopy(puzzle)\n for i in range(idx+1,len(values)):\n A = sourceA[i]\n B = sourceB[i]\n value = values[i]\n # using BFS to determine whether there is still path between A and B\n frontier = deque([A])\n if not pathAvailable(puzzle,frontier,B,rows,columns,value):\n return False\n\n return True\n\ndef check3(puzzle,rows,columns,value):\n # check if the move creates dead end\n\n for i in range(rows):\n for j in range(columns):\n node_now = puzzle[i][j]\n if node_now == \"_\":\n neighbors = getNeighbor([i,j],rows,columns)\n count = 0\n for k in range(len(neighbors)):\n neighborx = neighbors[k][0]\n neighbory = neighbors[k][1]\n if puzzle[neighborx][neighbory] == value:\n count += 1\n if count >= len(neighbors) - 1:\n #print(i,j)\n #print(puzzle)\n return False\n return True\n\ndef check4(puzzle,rows,columns,sourceA,sourceB,idx,values,pos):\n # check if there is isolated blank area\n modified = []\n sourceA_temp = copy.deepcopy(sourceA)\n sourceA_temp[idx] = pos\n for i in range (rows):\n for j in range(columns):\n node_now = puzzle[i][j]\n # start from a blank\n if node_now == \"_\":\n # using BFS to determine whether there is still path to source node\n A = [i,j]\n frontier = deque([A])\n indicator = False\n block_sourceA_neighbor = []\n block_sourceB_neighbor = []\n block_neighbor = []\n while not (not frontier):\n\n A = frontier.popleft()\n #print(A)\n x = A[0]\n y = A[1]\n puzzle[x][y] = 'X'\n modified.append([x, y])\n # print(puzzle)\n neighbors = getNeighbor(A, rows, columns)\n for m in range(len(neighbors)):\n neighbor = neighbors[m]\n\n if neighbor in sourceA_temp[idx:]:\n block_sourceA_neighbor.append(sourceA_temp.index(neighbor))\n\n elif neighbor in sourceB[idx:]:\n block_sourceB_neighbor.append(sourceB.index(neighbor))\n # if neighbor in sourceA[idx:] or neighbor in sourceB[idx:]:\n # indicator = True\n elif puzzle[neighbor[0]][neighbor[1]] == '_':\n frontier.append(neighbor)\n # if not indicator:\n indicator = bool(set(block_sourceA_neighbor) & set(block_sourceB_neighbor))\n #print(indicator)\n #print(puzzle)\n if not indicator:\n #print(puzzle)\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n #print(False)\n return False\n\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n #print(True)\n return True\n\ndef check5 (puzzle,idx,values,sourceA,sourceB,rows,columns,pos):\n # check if the move is valid (each non-source cell can have two neighbors with same color)\n value = values[idx]\n for row in range(rows):\n for column in range(columns):\n if [row,column] != sourceA and [row,column] != sourceB and [row,column] != pos:\n if puzzle[row][column] == value:\n count = 0\n neighbors = getNeighbor([row,column],rows,columns)\n for i in range(len(neighbors)):\n\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 2:\n return False\n return True\n\ndef check6(puzzle,rows,columns,pos):\n # clever way to consider if the move is valid\n # not used\n X = rows\n Y = columns\n getcircle = lambda x, y: [[x2, y2] for x2 in range(x - 1, x + 2)\n for y2 in range(y - 1, y + 2)\n if (-1 < x <= X and\n -1 < y <= Y and\n (x != x2 or y != y2) and\n (0 <= x2 <= X) and\n (0 <= y2 <= Y))]\n circle = getcircle(pos[0],pos[1])\n print(circle)\n modified = []\n for i in range(len(circle)):\n node_now = puzzle[circle[i][0]][circle[i][1]]\n if node_now == \"_\":\n # using BFS to determine whether there is still path to source node\n A = [circle[i][0],circle[i][1]]\n frontier = deque([A])\n while not (not frontier):\n A = frontier.popleft()\n # print(A)\n x = A[0]\n y = A[1]\n puzzle[x][y] = 'X'\n modified.append([x, y])\n # print(puzzle)\n neighbors = getNeighbor(A, rows, columns)\n for m in range(len(neighbors)):\n neighbor = neighbors[m]\n if neighbor in circle:\n if puzzle[neighbor[0]][neighbor[1]] == '_':\n frontier.append(neighbor)\n # if not indicator:\n if not frontier:\n break\n for i in range(len(circle)):\n node_now = puzzle[circle[i][0]][circle[i][1]]\n if node_now == \"_\":\n\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n # print(False)\n return False\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n # print(False)\n return True\n\ndef pathAvailable(puzzle,frontier,B,rows,columns,value):\n modified = []\n while not (not frontier):\n A = frontier.popleft()\n if A == B :\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n return True\n x = A[0]\n y = A[1]\n if puzzle[x][y] == '_':\n puzzle[x][y] = 'X'\n modified.append([x,y])\n #print(puzzle)\n neighbors = getNeighbor(A,rows,columns)\n #print(neighbors)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle[neighbor[0]][neighbor[1]] == '_' or neighbor == B:\n frontier.append(neighbor)\n\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n return False\n\ndef getNeighbor(pos,rows,columns):\n neighbors = []\n i_lower = max(0,pos[0] - 1) # up\n i_upper = min(rows-1,pos[0]+1) # down\n j_lower = max(0,pos[1] - 1) # left\n j_upper = min(columns-1,pos[1] + 1) #right\n\n\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n return neighbors\n\ndef getNeighbor_new(pos,rows,columns):\n neighbors = []\n i_lower = max(0,pos[0] - 1)\n i_upper = min(columns-1,pos[0]+1)\n j_lower = max(0,pos[1] - 1)\n j_upper = min(rows-1,pos[1] + 1)\n #print(i_lower,i_upper,j_upper,j_lower)\n if (0 == pos[1]) or (rows-1 == pos[1]):\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n else:\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n return neighbors\n\n\ndef getNeighbor_ordered(pos,rows,columns):\n neighbors = []\n i_lower = max(0,pos[0] - 1)\n i_upper = min(columns-1,pos[0]+1)\n j_lower = max(0,pos[1] - 1)\n j_upper = min(rows-1,pos[1] + 1)\n #print(i_lower,i_upper,j_upper,j_lower)\n\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n # order them with respect to the distance to the wall\n distance = []\n neighborsNew = []\n for i in range(len(neighbors)):\n A = neighbors[i]\n dist = abs(A[0] - 0) * abs(A[0] - (rows - 1)) * abs(A[1] - 0) * abs(A[1] - (columns - 1))\n distance.append([dist, i])\n distance = (sorted(distance, key = lambda length:length[0]))\n for i in range(len(distance)):\n idx = distance[i][1]\n neighborsNew.append(neighbors[idx])\n return(neighborsNew)\n\ndef getNeighbor_newnew(pos,rows,columns,idx,sourceB):\n # if the neighbors contain the sourcenode, add it first\n neighbors = []\n i_lower = max(0,pos[0] - 1) # up\n i_upper = min(rows-1,pos[0]+1) # down\n j_lower = max(0,pos[1] - 1) # left\n j_upper = min(columns-1,pos[1] + 1) #right\n\n\n neighborsNew = []\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n for i in range(len(neighbors)):\n if neighbors[i] == sourceB[idx]:\n neighbors[0],neighbors[i] = neighbors[i],neighbors[0]\n return(neighbors)\n\ndef heuristic(A,B,rows,columns):\n distance = []\n for i in range(len(A)):\n dist = abs(A[i][0] - 0) * abs(A[i][0] - (rows-1)) * abs(A[i][1] - 0) * abs(A[i][1]- (columns - 1))\n distance.append([dist,i])\n return(sorted(distance, key = lambda length:length[0]))\n\ndef ordering (idx,values,sourceA,sourceB,puzzle,rows,columns):\n order = []\n for i in range(idx+1,len(values)):\n A = sourceA[i]\n B = sourceB[i]\n neighborA = getNeighbor(A,rows,columns)\n neighborB = getNeighbor(B,rows,columns)\n count = 0\n for j in range(len(neighborA)):\n neighborAx = neighborA[j][0]\n neighborAy = neighborA[j][1]\n if puzzle[neighborAx][neighborAy] == \"_\":\n count += 1\n for j in range(len(neighborB)):\n neighborBx = neighborB[j][0]\n neighborBy = neighborB[j][1]\n if puzzle[neighborBx][neighborBy] == \"_\":\n count += 1\n order.append([count,i])\n order = sorted(order, key = lambda length:length[0])\n ordered_values = []\n ordered_sourceA = []\n ordered_sourceB = []\n for i in range(idx+1):\n ordered_values.append(values[i])\n ordered_sourceA.append(sourceA[i])\n ordered_sourceB.append(sourceB[i])\n for i in range(idx+1,len(values)):\n index = order[i - idx - 1][1]\n ordered_values.append(values[index])\n ordered_sourceA.append(sourceA[index])\n ordered_sourceB.append(sourceB[index])\n return ordered_values,ordered_sourceA,ordered_sourceB\n\n\n\ndef main():\n [puzzle, rows, columns, left, values,sourceA, sourceB] = read_Puzzle.generatePuzzle()\n print(puzzle)\n print(values)\n center = [math.floor(rows/2),math.floor(columns/2)]\n #order = heuristic(sourceA,sourceB,rows,columns)\n #order = order[::-1]\n idx = 0\n # for i in range(20000):\n # check2(puzzle,idx,values,sourceA,sourceB,rows,columns)\n # print(\"done\")\n Fordumb = []\n for i in range(len(values)):\n Fordumb.append([values[i],sourceA[i],sourceB[i]])\n shuffle(Fordumb)\n values_for_dumb = []\n sourceA_for_dumb = []\n sourceB_for_dumb = []\n for i in range(len(values)):\n values_for_dumb.append(Fordumb[i][0])\n sourceA_for_dumb.append(Fordumb[i][1])\n sourceB_for_dumb.append((Fordumb[i][2]))\n #order = [[0,0],[1,1],[2,2],[3,3],[4,4],[5,5],[6,6]]\n #pos_now = sourceA[order[idx][1]]\n pos_now = sourceA[0]\n # global count\n # count = 0\n #result = freeFlowSmart(puzzle,rows,columns,left, values,sourceA,sourceB,idx, [pos_now],order)\n #ordered_values,ordered_A,ordered_B = ordering(-1, values, sourceA, sourceB, puzzle, rows, columns)\n #pos_now = ordered_A[idx]\n start = time.time()\n result = freeFlowSmart(puzzle, rows, columns, left, values, sourceA, sourceB, idx, [pos_now])\n #result = freeFlowEcSmart(puzzle, rows, columns, left, values, sourceA, sourceB, idx, [pos_now])\n #result = freeFlowDumb(puzzle, rows, columns, left, values_for_dumb, sourceA_for_dumb, sourceB_for_dumb, idx, [sourceA_for_dumb[idx]])\n end = time.time()\n print(end-start)\n for i in range(rows):\n print(result[i])\n\nif __name__ == \"__main__\":\n main()", "sub_path": "Assignment2_Search_CSP/part1.py", "file_name": "part1.py", "file_ext": "py", "file_size_in_byte": 20961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "copy.deepcopy", "line_number": 23, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 42, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 63, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 211, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 240, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 249, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 333, "usage_type": "call"}, {"api_name": "read_Puzzle.generatePuzzle", "line_number": 540, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 543, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 553, "usage_type": "call"}, {"api_name": "time.time", "line_number": 569, "usage_type": "call"}, {"api_name": "time.time", "line_number": 573, "usage_type": "call"}]} +{"seq_id": "435596602", "text": "from rdflib import Graph, BNode, Literal, URIRef, RDFS, RDF, plugin\nfrom rdflib.store import Store\nimport os\n\ndef test1():\n store = plugin.get('SQLAlchemy', Store)(\n identifier=URIRef(\"rdflib_test\"),\n configuration=Literal(\"sqlite:///%(here)s/development.sqlite\" % {\"here\": os.getcwd()}))\n g = Graph(store)\n statementId = BNode()\n print(len(g))\n g.add((statementId, RDF.type, RDF.Statement))\n g.add((statementId, RDF.subject, URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))\n g.add((statementId, RDF.predicate, RDFS.label))\n g.add((statementId, RDF.object, Literal(\"Conjunctive Graph\")))\n print(len(g))\n for s, p, o in g:\n print(type(s))\n\n for s, p, o in g.triples((None, RDF.object, None)):\n print(o)\n\n g.remove((statementId, RDF.type, RDF.Statement))\n print(len(g))\n os.unlink(\"%(here)s/development.sqlite\" % {\"here\": os.getcwd()})\n\n", "sub_path": "test/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rdflib.plugin.get", "line_number": 6, "usage_type": "call"}, {"api_name": "rdflib.store.Store", "line_number": 6, "usage_type": "argument"}, {"api_name": "rdflib.plugin", "line_number": 6, "usage_type": "name"}, {"api_name": "rdflib.URIRef", "line_number": 7, "usage_type": "call"}, {"api_name": "rdflib.Literal", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "rdflib.Graph", "line_number": 9, "usage_type": "call"}, {"api_name": "rdflib.BNode", "line_number": 10, "usage_type": "call"}, {"api_name": "rdflib.RDF.type", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 12, "usage_type": "name"}, {"api_name": "rdflib.RDF.Statement", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rdflib.RDF.subject", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 13, "usage_type": "name"}, {"api_name": "rdflib.URIRef", "line_number": 13, "usage_type": "call"}, {"api_name": "rdflib.RDF.predicate", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 14, "usage_type": "name"}, {"api_name": "rdflib.RDFS.label", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rdflib.RDFS", "line_number": 14, "usage_type": "name"}, {"api_name": "rdflib.RDF.object", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 15, "usage_type": "name"}, {"api_name": "rdflib.Literal", "line_number": 15, "usage_type": "call"}, {"api_name": "rdflib.RDF.object", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 20, "usage_type": "name"}, {"api_name": "rdflib.RDF.type", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 23, "usage_type": "name"}, {"api_name": "rdflib.RDF.Statement", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "21765081", "text": "from django.shortcuts import render\nfrom message.models import Message\nfrom django.http import Http404\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\n@login_required\ndef log_audit(request):\n '''\n 审计日志\n '''\n if request.user.is_superuser:\n logs = Message.objects.all()[:300]\n\n if request.method == 'GET':\n if 'aid' in request.GET:\n aid = request.get_full_path().split('=')[1]\n log_detail = Message.objects.filter(id=aid)\n data = {\n 'log_detail': log_detail,\n 'page_name': '日志明细'\n }\n return render(request, 'message/log_audit_detail.html',data)\n data = {\n 'all_logs':logs,\n 'page_name':'审计日志'\n }\n\n return render(request, 'message/log_audit.html', data)\n else:\n raise Http404\n", "sub_path": "message/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 938, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "message.models.Message.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "message.models.Message.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "message.models.Message", "line_number": 14, "usage_type": "name"}, {"api_name": "message.models.Message.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "message.models.Message.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "message.models.Message", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "395634859", "text": "#!/usr/bin/env python3\n# encoding:utf-8\n\nimport requests\nfrom pyquery import PyQuery as pq\nfrom fake_useragent import UserAgent\nfrom urllib.parse import quote\n\nfrom database import Database\n\n\nclass Zongheng():\n\n def __init__(self):\n self.db = Database()\n\n def get_book_information(self, book):\n name, source_id, *_, search = book\n book, author, category = [{}, {}, {}]\n print('search {} at {}'.format(name, search))\n # http://search.zongheng.com/search/all/永夜君王/1.html\n url = search + 'all/' + quote(name) + '/1.html'\n headers = {'Referer': search, 'User-Agent': UserAgent().random}\n result = pq(requests.get(url, headers=headers).text)('.search_text').eq(0)\n if pq(result)('a').eq(0).text().replace(' ','') == name:\n print('book {} found'.format(name))\n book['name'] = name\n book['source_id'] = source_id\n book['book_link'] = pq(result)('a').eq(0).attr('href')\n book['toc_link'] = pq(result)('.search_oprate')('.a_un').eq(1).attr('href')\n author['name'] = pq(result)('a').eq(1).text()\n author['link'] = pq(result)('a').eq(1).attr('href')\n category['name'] = pq(result)('a').eq(2).text()\n print(book, author, category)\n else:\n print(\"book {} not found\".format(name))\n return book, author, category\n\n def get_chapters(self, book):\n book_id, name, toc_link, source_id, source_name = book\n chapters = []\n print('get chapter list for {}'.format(name))\n headers = {'User-Agent': UserAgent().random}\n results = pq(requests.get(toc_link, headers=headers).text)('#chapterListPanel')('.chapterBean')\n for r in results:\n chapter, chapter_list = [{}, {}]\n chapter['name'] = pq(r)('td').attr('chaptername')\n chapter['book_id'] = book_id\n chapter['is_new'] = True\n chapter['update_time'] = pq(r)('td').attr('updatetime')\n chapter['word_num'] = pq(r)('td').attr('wordnum')\n chapter_list['source_id'] = source_id\n chapter_list['link'] = pq(r)('a').attr('href')\n chapters.append(chapter)\n\n def update_chapters(self, book):\n id, name, link, source = book", "sub_path": "cashew/lidl/zongheng.py", "file_name": "zongheng.py", "file_ext": "py", "file_size_in_byte": 2292, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "database.Database", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 22, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 23, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 25, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 29, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 30, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 31, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 32, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 33, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 43, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 47, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 50, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 51, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "562241773", "text": "import pygame\r\nimport random as r\r\nimport math\r\nfrom pygame import mixer\r\n\r\n# Initialization of package\r\npygame.init()\r\n\r\n# screen creation\r\nscreen = pygame.display.set_mode((800, 600))\r\n\r\n# Title and icon\r\npygame.display.set_caption(\"Space Invaders\")\r\nicon = pygame.image.load(\"space.png\")\r\npygame.display.set_icon(icon)\r\n\r\n# Background music\r\nmixer.music.load('background.wav')\r\nmixer.music.play(-1)\r\n\r\n# Background\r\nbackground = pygame.image.load('background.png')\r\n\r\n# Player\r\nplayerImg = pygame.image.load(\"player.png\")\r\nplayerX = 370\r\nplayerY = 480\r\nplayerX_change = 0\r\n\r\n# Enemy\r\nenemyImg = list()\r\nenemyX = list()\r\nenemyY = list()\r\nenemyX_change = list()\r\nenemyY_change = list()\r\nnum_of_enemies = 6\r\nfor i in range(num_of_enemies):\r\n enemyImg.append(pygame.image.load(\"enemy.png\"))\r\n enemyX.append(r.randint(0, 735))\r\n enemyY.append(r.randint(50, 150))\r\n enemyX_change.append(6)\r\n enemyY_change.append(55)\r\n\r\n# Bullet\r\nbulletImg = pygame.image.load(\"bullet.png\")\r\nbulletX = 0\r\nbulletY = 480\r\nbulletX_change = 0\r\nbulletY_change = 8\r\n\"\"\" Ready - You can't see bullet on the screen\r\n Fire -- The bullet is currently moving\r\n\"\"\"\r\nbullet_state = \"ready\"\r\n\r\n# Score\r\nscore_value = 0\r\nfont = pygame.font.Font('freesansbold.ttf', 32)\r\n\r\ntextX = 10\r\ntextY = 10\r\n\r\n# Game over text\r\nover_font = pygame.font.Font('freesansbold.ttf', 64)\r\n\r\n\r\ndef show_score(x, y):\r\n score = font.render(\"Score: \" + str(score_value), True, (255, 255, 255))\r\n screen.blit(score, (x, y))\r\n\r\n\r\ndef game_over_text():\r\n over_text = over_font.render('GAME OVER', True, (255, 255, 255))\r\n screen.blit(over_text, (200, 250))\r\n\r\n\r\ndef player(x, y):\r\n screen.blit(playerImg, (x, y))\r\n\r\n\r\ndef enemy(x, y, i):\r\n screen.blit(enemyImg[i], (x, y))\r\n\r\n\r\ndef fire_bullet(x, y):\r\n global bullet_state\r\n bullet_state = \"fire\"\r\n screen.blit(bulletImg, (x + 16, y + 10))\r\n\r\n\r\ndef is_collision(ex, ey, bx, by):\r\n distance = math.sqrt((math.pow(ex - bx, 2)) + (math.pow(ey - by, 2)))\r\n # print(distance)\r\n if distance < 27:\r\n return True\r\n return False\r\n\r\n\r\n# Game Loop\r\nrunning = True\r\n\r\nwhile running:\r\n # RGB - red, green, blue\r\n screen.fill((255, 255, 255))\r\n screen.blit(background, (0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n # to check for keystroke whether it's right or left\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n playerX_change = -5\r\n if event.key == pygame.K_RIGHT:\r\n playerX_change = 5\r\n if event.key == pygame.K_SPACE:\r\n if bullet_state is \"ready\":\r\n bullet_sound = mixer.Sound('laser.wav')\r\n bullet_sound.play()\r\n bulletX = playerX\r\n fire_bullet(bulletX, bulletY)\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n playerX_change = 0\r\n\r\n playerX += playerX_change\r\n\r\n if playerX <= 0:\r\n playerX = 0\r\n elif playerX > 736:\r\n playerX = 736\r\n\r\n for i in range(num_of_enemies):\r\n\r\n if enemyY[i] >= 350:\r\n for j in range(num_of_enemies):\r\n enemyY[j] = 1000\r\n game_over_text()\r\n break\r\n enemyX[i] += enemyX_change[i]\r\n if enemyX[i] <= 0:\r\n enemyX_change[i] = 2\r\n enemyY[i] += enemyY_change[i]\r\n elif enemyX[i] >= 736:\r\n enemyX_change[i] = -2\r\n enemyY[i] += enemyY_change[i]\r\n\r\n # Collision\r\n collision = is_collision(enemyX[i], enemyY[i], bulletX, bulletY)\r\n if collision:\r\n bulletY = 480\r\n bullet_state = \"ready\"\r\n score_value += 10\r\n enemyX[i] = r.randint(0, 735)\r\n enemyY[i] = r.randint(50, 150)\r\n\r\n explosion_sound = mixer.Sound('explosion.wav')\r\n explosion_sound.play()\r\n enemy(enemyX[i], enemyY[i], i)\r\n # Bullet movement\r\n\r\n if bulletY <= 0:\r\n bulletY = 480\r\n bullet_state = \"ready\"\r\n\r\n if bullet_state is \"fire\":\r\n fire_bullet(bulletX, bulletY)\r\n bulletY -= bulletY_change\r\n\r\n player(playerX, playerY)\r\n show_score(textX, textY)\r\n pygame.display.update()\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 18, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 19, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 38, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 63, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 91, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 117, "usage_type": "name"}, {"api_name": "pygame.KEYUP", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 123, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 154, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 155, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 157, "usage_type": "name"}, {"api_name": "pygame.display.update", "line_number": 172, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "337907518", "text": "import numpy\nimport talib\nfrom logic import MarketTrend\nfrom logic import Indicator, validate_datapoint\nfrom logic.candle import Candle\n\n\nclass TakeProfit(Indicator):\n\n def __init__(self, atr_period_length=7):\n super(TakeProfit, self).__init__()\n self.period = atr_period_length\n self._high = []\n self._low = []\n self._close = []\n self.position_type = MarketTrend.ENTER_LONG\n self.current_takeprofit_price = 0.0\n self.state = MarketTrend.NO_STOP\n\n def GetState(self):\n return self.state\n\n def seen_enough_data(self):\n return self.period <= len(self._high)\n\n def AmountOfDataStillMissing(self):\n return max(0, self.period - len(self._high))\n\n def Tickerupdate(self, datapoint):\n if not validate_datapoint(datapoint):\n return\n\n # Check if it is time to do a stop loss trade\n if (self.current_takeprofit_price > 0.0):\n if (self.position_type == MarketTrend.ENTER_LONG):\n if (datapoint[\"value\"] > self.current_takeprofit_price):\n # Should sell Long position\n self.state = MarketTrend.STOP_LONG\n self.current_takeprofit_price = 0.0\n elif (self.position_type == MarketTrend.ENTER_SHORT):\n if (datapoint[\"value\"] < self.current_takeprofit_price):\n # Should buy back short position\n self.state = MarketTrend.STOP_SHORT\n self.current_takeprofit_price = 0.0\n\n def update(self, datapoint):\n\n if not isinstance(datapoint, Candle):\n self.Tickerupdate(datapoint)\n return\n\n self._high.append(datapoint.high)\n self._low.append(datapoint.low)\n self._close.append(datapoint.close)\n\n if (len(self._high) > self.period):\n self._close.pop(0)\n self._low.pop(0)\n self._high.pop(0)\n\n def SetTakeProfit(self, price, position_type=MarketTrend.ENTER_LONG):\n if (position_type != MarketTrend.ENTER_LONG and\n position_type != MarketTrend.ENTER_SHORT):\n return\n if (price <= 0.0):\n return\n self.position_type = position_type\n self.current_takeprofit_price = price\n self.state = MarketTrend.NO_STOP\n\n def GetPrice(self, position_type=MarketTrend.ENTER_LONG):\n\n if (not self.seen_enough_data()):\n return numpy.nan\n\n high = numpy.array(self._high, dtype=float)\n low = numpy.array(self._low, dtype=float)\n close = numpy.array(self._close, dtype=float)\n ATR = talib.ATR(high, low, close, timeperiod=self.period - 1)[-1]\n takeprofit_price = self._close[-1]\n\n if (position_type == MarketTrend.ENTER_LONG):\n takeprofit_price += 1.0 * ATR\n elif (position_type == MarketTrend.ENTER_SHORT):\n takeprofit_price -= 1.0 * ATR\n else:\n takeprofit_price = numpy.nan\n\n return takeprofit_price\n\n def CancelTakeProfit(self):\n self.state = MarketTrend.NO_STOP\n self.current_takeprofit_price = 0.0\n\n def IsSet(self):\n return self.current_takeprofit_price != 0.0\n", "sub_path": "logic/takeprofit.py", "file_name": "takeprofit.py", "file_ext": "py", "file_size_in_byte": 3211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logic.Indicator", "line_number": 8, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 16, "usage_type": "name"}, {"api_name": "logic.MarketTrend.NO_STOP", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 18, "usage_type": "name"}, {"api_name": "logic.validate_datapoint", "line_number": 30, "usage_type": "call"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 35, "usage_type": "name"}, {"api_name": "logic.MarketTrend.STOP_LONG", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 38, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_SHORT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 40, "usage_type": "name"}, {"api_name": "logic.MarketTrend.STOP_SHORT", "line_number": 43, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 43, "usage_type": "name"}, {"api_name": "logic.candle.Candle", "line_number": 48, "usage_type": "argument"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 61, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 62, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 62, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_SHORT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 63, "usage_type": "name"}, {"api_name": "logic.MarketTrend.NO_STOP", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 69, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 71, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "talib.ATR", "line_number": 79, "usage_type": "call"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 82, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 82, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_SHORT", "line_number": 84, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 87, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend.NO_STOP", "line_number": 92, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "651752315", "text": "from PySide2.QtWidgets import QTextEdit\nfrom PySide2.QtGui import QSyntaxHighlighter, QColor, QTextCharFormat#, QFont\nfrom PySide2.QtCore import QRegExp\n\nfrom shiboken2 import wrapInstance\nfrom maya.OpenMayaUI import MQtUtil \n\n\n# dependancies: tk_scriptEditorOutput.mel\n\nclass SH(QSyntaxHighlighter):\n\t'''\n\tSyntax Highlight class, used by all SK_*_Codes SHs\n\t:param parent: parent's widget\n\t'''\n\tdef __init__(self, parent):\n\t\tQSyntaxHighlighter.__init__(self, parent) #inherit\n\t\tself.parent = parent #define parent explicitly\n\t\t\n\tdef highlightBlock(self, text):\n\t\t# Derived from Qt function, used to apply color-syntaxing to text\n\t\t# :param text: text input\n\t\t\n\t\trules = [(QColor( 90, 90, 90), r\"^(//|#).+$\"), #grey 90, 90, 90\n\t\t\t\t (QColor(205, 200, 120), r\"^(//|#) Warning.+$\"), #yellow 205, 200, 120\n\t\t\t\t (QColor(165, 75, 75), r\"^(//|#).+Error.+$\"), #red 165, 75, 75\n\t\t\t\t (QColor(115, 215, 150), r\"^(//|#).+Result.+$\")] #green 115, 215, 150\n\t\t# loop through rules\n\t\tfor color, pattern in rules:\n\t\t\tkeyword = QTextCharFormat()\n\t\t\tkeyword.setForeground(color)\n\t\t\t# get regexp pattern\n\t\t\texpression = QRegExp(pattern)\n\t\t\tindex = expression.indexIn(text)\n\t\t\t# loop until all matches are done\n\t\t\twhile index >= 0:\n\t\t\t\tlength = expression.matchedLength()\n\t\t\t\t# format text with current formatting\n\t\t\t\tself.setFormat(index, length, keyword)\n\t\t\t\tindex = expression.indexIn(text, index + length)\n\t\tself.setCurrentBlockState(0)\n\n\ndef wrap():\n\ti=1\n\twhile i:\n\t\ttry:\n\t\t\tse_edit = wrapInstance(long(MQtUtil.findControl('cmdScrollFieldReporter%i' %i)), QTextEdit)\n\t\t\tbreak\n\t\texcept TypeError:\n\t\t\ti+=1\n\tsyntax_highlighter = SH(se_edit)\n\n\t#untested. send to $tk_cmdScrollFieldReporter explicitly. used in place of above code.\n\t# cmdScrollFieldReporter = \"$tk_cmdScrollFieldReporter\"\n\t# se_edit = wrapInstance(long(MQtUtil.findControl(cmdScrollFieldReporter)), QTextEdit)\n\t# syntax_highlighter = SH(se_edit)\n \n\n\n\t#unused from original script\n\t# # try:\n\t# # syntax_highlighter.deleteLater()\n\t# # except:\n\t# # pass", "sub_path": "maya/scriptEditorOutputTextHighlighting.py", "file_name": "scriptEditorOutputTextHighlighting.py", "file_ext": "py", "file_size_in_byte": 2041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PySide2.QtGui.QSyntaxHighlighter", "line_number": 11, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QSyntaxHighlighter.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QSyntaxHighlighter", "line_number": 17, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 24, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 25, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 26, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 27, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QTextCharFormat", "line_number": 30, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QRegExp", "line_number": 33, "usage_type": "call"}, {"api_name": "shiboken2.wrapInstance", "line_number": 48, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTextEdit", "line_number": 48, "usage_type": "argument"}, {"api_name": "maya.OpenMayaUI.MQtUtil.findControl", "line_number": 48, "usage_type": "call"}, {"api_name": "maya.OpenMayaUI.MQtUtil", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "187910194", "text": "#!/usr/bin/python3\n\nfrom sys import stdin\nfrom itertools import repeat\n\n\ndef fold(a, b):\n a.append((float('inf'),'inf'))\n b.append((float('inf'),'inf'))\n i = 0\n j = 0\n r = []\n for k in range(len(a)+len(b)-2):\n if (a[i][0] <= b[j][0]):\n r.append(a[i])\n i = i + 1\n elif (a[i][0] > b[j][0]):\n r.append(b[j])\n j = j + 1\n return r\n\ndef merge(decks):\n # SKRIV DIN KODE HER\n result = []\n for k in decks:\n result = (fold(result, k))\n a = []\n for t in result:\n a.append(t[1])\n return ''.join(a)\n \n \n\n\ndef main():\n # Read input.\n decks = []\n for line in stdin:\n (index, csv) = line.strip().split(':')\n deck = list(zip(map(int, csv.split(',')), repeat(index)))\n decks.append(deck)\n # Merge the decks and print result.\n print(merge(decks))\n\n\nif __name__ == \"__main__\":\n main()\n\n", "sub_path": "Oving3/kortstokker.py", "file_name": "kortstokker.py", "file_ext": "py", "file_size_in_byte": 922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.stdin", "line_number": 38, "usage_type": "name"}, {"api_name": "itertools.repeat", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "248478467", "text": "import pygame\r\n\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Lift(Sprite):\r\n\r\n\t\"\"\"A class to manage bullets fired from the ship\"\"\"\r\n\r\n\tdef __init__(self, ai_settings, screen, nuke):\r\n\r\n\t\t\"\"\"Create a bullet object at the ship's current position.\"\"\"\r\n\r\n\t\tsuper(Lift, self).__init__()\r\n\r\n\t\tself.screen = screen\r\n\t\tself.image = pygame.image.load('images/turretlift.gif')\r\n\t\t# Create a bullet rect at (0, 0) and then set correct position.\r\n\r\n\t\tself.rect = pygame.Rect(0, 0, 100, 100)\r\n\r\n\t\tself.rect.x = 200\r\n\t\tself.rect.y = (nuke.rect.y + nuke.rect.height)\r\n\r\n\r\n\t\t# Store the bullet's position as a decimal value.\r\n\r\n\t\t\r\n\r\n\t\tself.color = ai_settings.ground_color\r\n\r\n\t\r\n\r\n\t\t\r\n\r\n\tdef blitme(self, nuke):\r\n\r\n\t\t\"\"\"Draw the bullet to the screen.\"\"\"\r\n\r\n\t\tself.rect.y = (nuke.rect.y + nuke.rect.height)\r\n\t\tself.screen.blit(self.image, self.rect)\r\n\r\n\r\n", "sub_path": "Zombies/turretlift.py", "file_name": "turretlift.py", "file_ext": "py", "file_size_in_byte": 837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 5, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "574672433", "text": "# import pickle\n\n# boxes = pickle.load(open('/home/junjie/Code/faster-rcnn-gcn/output/res101/voc_2007_test/rec_bgul/detections.pkl', 'rb'))\n# re_class_boxes = pickle.load(open('/home/junjie/Code/faster-rcnn-gcn/output/res101/voc_2007_test/origin/detections.pkl', 'rb'))\n\n\n# pass\n\n\nimport json\ncoco = json.load(open('/home/junjie/Code/Datasets/COCO/annotations/instances_trainval2014.json', 'rb'))\n\nclass_name = [ 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor']\n\nclass_name = [ 'airplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'dining table', 'dog', 'horse',\n 'motorcycle', 'person', 'potted plant',\n 'sheep', 'couch', 'train', 'tv']\n\n\ni = 0\nindex = []\nfor cat in coco['categories']:\n if cat['name'] in class_name:\n print(cat['name'])\n i += 1\n index.append(cat['id'])\n \nprint(i)\n\nprint(index)\npass", "sub_path": "utils/read_output.py", "file_name": "read_output.py", "file_ext": "py", "file_size_in_byte": 1205, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "382689673", "text": "from keras.models import load_model\nfrom keras.models import model_from_json\nfrom keras.applications import imagenet_utils\nfrom keras.preprocessing.image import img_to_array\nfrom sklearn.preprocessing import scale\nimport os, sys\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom keras.applications import imagenet_utils\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# dimensions of our images\nimg_width, img_height = 224, 224\ncatsPredicted, birdsPredicted, NothingPredicted = 0, 0, 0\nfolder =['/home/ioannis/Desktop/foto/cats',\n '/home/ioannis/Desktop/foto/birds',\n '/home/ioannis/Desktop/foto/nothing']\n\n\n\n#load the model we created\njson_file = open('/home/ioannis/Desktop/model_l2.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weight into model\nloaded_model.load_weights(\"/home/ioannis/Desktop/model_l2.h5\")\nprint(\"\\nModel successfully loaded from disk! \")\n\n#print model summary\nprint(loaded_model.summary())\n\n#Predict image\ndef predict_image(image):\n img =image.load_img(image, target_size=(224, 224))\n img = np.asarray(img,'float32')/255.0\n image = np.expand_dims(img, axis = 0)\n preds = loaded_model.predict(image)\n print(\"\\rImage is : \" + image)\n #pred_classes = np.argmax(preds)\n print(preds)\n print(pred_classes)\n\nfor subfolder in folder :\n catsPredicted, birdsPredicted, NothingPredicted = 0, 0, 0\n print(\"\\nPredicting\",subfolder , \"images\")\n for filename in os.listdir(subfolder):\n #print(filename)\n x = subfolder +'/'+filename\n img =image.load_img(x, target_size=(224, 224))\n img1 = np.asarray(img,'float32')/255.0\n image2 = np.expand_dims(img1, axis = 0)\n preds = loaded_model.predict(image2)\n birdsPredicted +=preds[0,0]\n catsPredicted += preds[0,1]\n NothingPredicted += preds[0,2]\n catmeans = catsPredicted /50\n birdsmean = birdsPredicted /50\n nothingmean = NothingPredicted /50\n allmeans = [round(catmeans, 2) , round(birdsmean, 2), round(nothingmean, 2)]\n print(' Cat | Bird | Nothing')\n print(allmeans)\n", "sub_path": "Neural_Networks/Deep_Learning/Part_2/load_model.py", "file_name": "load_model.py", "file_ext": "py", "file_size_in_byte": 2172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "keras.models.model_from_json", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 36, "usage_type": "argument"}, {"api_name": "numpy.asarray", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 39, "usage_type": "argument"}, {"api_name": "keras.preprocessing.image", "line_number": 40, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "508886699", "text": "# Import required libraries\nimport pandas as pd\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n\n# Read the airline data into pandas dataframe\nspacex_df = pd.read_csv(\"spacex_launch_dash.csv\")\nmax_payload = spacex_df['Payload Mass (kg)'].max()\nmin_payload = spacex_df['Payload Mass (kg)'].min()\n\n# Create a dash application\napp = dash.Dash(__name__)\n\n# Create an app layout\napp.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',\n\t\t\t\t\t\t\t\t\t\tstyle={'textAlign': 'center', 'color': '#503D36',\n\t\t\t\t\t\t\t\t\t\t\t 'font-size': 40}),\n\t\t\t\t\t\t\t\t# TASK 1: Add a dropdown list to enable Launch Site selection\n\t\t\t\t\t\t\t\t# The default select value is for ALL sites\n\t\t\t\t\t\t\t\tdcc.Dropdown(\n\t\t\t\t\t\t\t\t\tid='site-dropdown',\n\t\t\t\t\t\t\t\t\toptions=[\n\t\t\t\t\t\t\t\t\t\t{'label':'ALL', 'value':'ALL'},\n\t\t\t\t\t\t\t\t\t\t{'label':'CCAFS LC-40', 'value':'CCAFS LC-40'},\n\t\t\t\t\t\t\t\t\t\t{'label':'CCAFS SLC-40', 'value':'CCAFS SLC-40'},\n\t\t\t\t\t\t\t\t\t\t{'label':'KSC LC-39A', 'value':'KSC LC-39A'},\n\t\t\t\t\t\t\t\t\t\t{'label':'VAFB SLC-4E', 'value':'VAFB SLC-4E'}\n\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\tvalue='ALL',\n\t\t\t\t\t\t\t\t\tplaceholder='Select a Launch Site here',\n\t\t\t\t\t\t\t\t\tsearchable=True\n\t\t\t\t\t\t\t\t),\n\n\t\t\t\t\t\t\t\t# TASK 2: Add a pie chart to show the total successful launches count for all sites\n\t\t\t\t\t\t\t\t# If a specific launch site was selected, show the Success vs. Failed counts for the site\n\t\t\t\t\t\t\t\thtml.Div(dcc.Graph(id='success-pie-chart')),\n\t\t\t\t\t\t\t\thtml.Br(),\n\n\t\t\t\t\t\t\t\thtml.P(\"Payload range (Kg):\"),\n\t\t\t\t\t\t\t\t# TASK 3: Add a slider to select payload range\n\t\t\t\t\t\t\t\tdcc.RangeSlider(\n\t\t\t\t\t\t\t\t\tid='payload-slider',\n\t\t\t\t\t\t\t\t\tmin=0,\n\t\t\t\t\t\t\t\t\tmax=10000,\n\t\t\t\t\t\t\t\t\tstep=1000,\n\t\t\t\t\t\t\t\t\tmarks={\n\t\t\t\t\t\t\t\t\t\t0:'0',\n\t\t\t\t\t\t\t\t\t\t2500:'2500',\n\t\t\t\t\t\t\t\t\t\t5000:'5000',\n\t\t\t\t\t\t\t\t\t\t7500:'7500',\n\t\t\t\t\t\t\t\t\t\t10000:'10,000'\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tvalue=[min_payload, max_payload]\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\thtml.Br(),\n\n\t\t\t\t\t\t\t\t# TASK 4: Add a scatter chart to show the correlation between payload and launch success\n\t\t\t\t\t\t\t\thtml.Div(dcc.Graph(id='success-payload-scatter-chart')),\n\t\t\t\t\t\t\t\t])\n\n# TASK 2:\n# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output\n\n@app.callback( Output(component_id='success-pie-chart', component_property='figure'),\n\t\t\t Input(component_id='site-dropdown', component_property='value'))\n\ndef build_pie(site):\n\n\tdf_success = spacex_df[(spacex_df['class']==1)]\n\tdf_x = pd.DataFrame(df_success.groupby(['Launch Site'])['class'].value_counts())\n\tdf = spacex_df.copy()\n\n\tif site == 'ALL':\n\n\t\t#Success Counts for ALL Sites\n\t\tfig = px.pie(df_x,values=\"class\", names=\"class\", title='Success Counts for ALL Sites')\n\t\treturn fig\n\n\telse:\n\t\tddf = df[df['Launch Site']==site]\n\t\tddfg = pd.DataFrame(ddf.groupby(['Launch Site','class'])['class'].value_counts())\n\t\tfig = px.pie(ddfg, values='class', names='class', title='Succes count of '+site)\n\n\t\treturn fig\n\n# TASK 4:\n# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output\n\n@app.callback(Output(component_id='success-payload-scatter-chart', component_property='figure'),\n\t\t\t [Input(component_id='site-dropdown', component_property='value'),\n\t\t\t\tInput(component_id='payload-slider', component_property='value')])\n\ndef build_scatter(site,payload):\n\n\tlow,high = (payload[0], payload[1])\n\tdf = spacex_df #copy not needed\n\t# filter your weights out here since you need to filter it whether all sites or an individual site is selected\n\tfiltered_dfa = df[df['Payload Mass (kg)'].between(low,high)]\n\n\tif site == 'ALL':\n\t\tfig = px.scatter(filtered_dfa,x=\"Payload Mass (kg)\", y=\"class\", color=\"Booster Version Category\", title='Payload vs. Outcome for All Sites')\n\telse:\n\t\t# now we can use our filtered payload weights to filter further by site in our else statement\n\t\tfiltered_dfb = filtered_dfa[filtered_dfa['Launch Site'] == site]\n\t\tfig = px.scatter(filtered_dfb,x=\"Payload Mass (kg)\", y=\"class\", color=\"Booster Version Category\", title='Payload vs. Outcome for' + site)\n\t# now we can return fig once at the end of our function since fig is what we want either way.\n\t# our if else will produce a different fig for us based on the condition, but variable name is the same\n\treturn fig\n# Run the app\nif __name__ == '__main__':\n\tapp.run_server(debug=True, use_reloader=False, port=8051)\n", "sub_path": "spacex_dash_app.py", "file_name": "spacex_dash_app.py", "file_ext": "py", "file_size_in_byte": 4338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 15, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 23, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 39, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 39, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 40, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 42, "usage_type": "call"}, {"api_name": "dash_core_components.RangeSlider", "line_number": 44, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 58, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 61, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 73, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 79, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 79, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 85, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 85, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 67, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 68, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 104, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 104, "usage_type": "name"}, {"api_name": "plotly.express.scatter", "line_number": 108, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 108, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 92, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 93, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "201415950", "text": "from django.conf import settings\nfrom django.contrib.auth import (\n REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout,\n)\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404, redirect, render, resolve_url\nfrom django.utils import translation\nfrom django.utils.http import is_safe_url\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.i18n import set_language as set_language_view\n\nfrom apps.organization.models import Location, Membership, Organization\nfrom apps.scheduling.models import Availability, BartenderAvailability, Event\n\nfrom .forms import RegisterForm\n\n\ndef _get_login_redirect_url(request, redirect_to):\n # Ensure the user-originating redirection URL is safe.\n if not is_safe_url(url=redirect_to, host=request.get_host()):\n return resolve_url(settings.LOGIN_REDIRECT_URL)\n return redirect_to\n\n\n@sensitive_post_parameters('password')\n@csrf_protect\ndef login(request):\n redirect_to = request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME, ''))\n\n if request.user.is_authenticated():\n redirect_to = _get_login_redirect_url(request, redirect_to)\n if redirect_to == request.path:\n raise ValueError(\n \"Redirection loop for authenticated user detected. Check that \"\n \"your LOGIN_REDIRECT_URL doesn't point to a login page.\"\n )\n return redirect(redirect_to)\n elif request.method == 'POST':\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n user = form.get_user()\n auth_login(request, user)\n\n # Primaire organisatie instellen\n if hasattr(user, 'profile') and user.profile.current_organization:\n request.session['organization_pk'] = user.profile.current_organization.pk\n\n # Taal instellen\n if hasattr(user, 'profile') and user.profile.current_language:\n translation.activate(user.profile.current_language)\n request.session[translation.LANGUAGE_SESSION_KEY] = user.profile.current_language\n\n if not user.first_name or not user.email:\n # User information is not complete, redirect to register page.\n return redirect(register)\n\n return redirect(_get_login_redirect_url(request, redirect_to))\n else:\n form = AuthenticationForm(request)\n\n redirect_field_name = REDIRECT_FIELD_NAME\n\n return render(request, 'general/login.html', locals())\n\n\ndef logout(request):\n auth_logout(request)\n return redirect(settings.LOGIN_URL)\n\n\n@login_required\ndef register(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('schedule')\n else:\n form = RegisterForm(instance=request.user)\n\n return render(request, 'general/register.html', locals())\n\n\n@login_required\ndef change_current_organization(request, organization):\n org = get_object_or_404(Organization, slug=organization)\n request.session['organization_pk'] = org.pk\n request.user.profile.current_organization = org\n request.user.profile.save()\n return redirect(request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME, '')))\n\n\ndef change_current_language(request):\n response = set_language_view(request)\n if hasattr(request.user, 'profile'):\n request.user.profile.current_language = request.session[translation.LANGUAGE_SESSION_KEY]\n request.user.profile.save()\n return response\n\n\ndef about(request):\n count = {\n 'organizations': Organization.objects.count(),\n 'users': User.objects.count(),\n 'tenders': Membership.objects.values('user_id').distinct().count(),\n 'locations': Location.objects.count(),\n 'public_locations': Location.objects.filter(is_public=True).count(),\n 'first_event': Event.objects.order_by('starts_at')[0],\n 'events': Event.objects.count(),\n 'bartender_availabilities': BartenderAvailability.objects.count(),\n 'bartender_availabilities_yes': BartenderAvailability.objects.filter(\n availability__nature__in=(Availability.ASSIGNED, Availability.YES),\n ).count(),\n 'bartender_availabilities_assigned': BartenderAvailability.objects.filter(\n availability__nature=Availability.ASSIGNED,\n ).count(),\n }\n return render(request, 'general/about.html', locals())\n\n\ndef help(request):\n return render(request, 'general/help.html')\n", "sub_path": "apps/general/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4799, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.utils.http.is_safe_url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.resolve_url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.settings.LOGIN_REDIRECT_URL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "django.contrib.auth.REDIRECT_FIELD_NAME", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 45, "usage_type": "call"}, {"api_name": "django.utils.translation.activate", "line_number": 53, "usage_type": "call"}, {"api_name": "django.utils.translation", "line_number": 53, "usage_type": "name"}, {"api_name": "django.utils.translation.LANGUAGE_SESSION_KEY", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.utils.translation", "line_number": 54, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.REDIRECT_FIELD_NAME", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}, {"api_name": "django.views.decorators.debug.sensitive_post_parameters", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.conf.settings.LOGIN_URL", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 71, "usage_type": "name"}, {"api_name": "forms.RegisterForm", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 82, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 89, "usage_type": "call"}, {"api_name": "apps.organization.models.Organization", "line_number": 89, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "django.contrib.auth.REDIRECT_FIELD_NAME", "line_number": 93, "usage_type": "argument"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 87, "usage_type": "name"}, {"api_name": "django.views.i18n.set_language", "line_number": 97, "usage_type": "call"}, {"api_name": "django.utils.translation.LANGUAGE_SESSION_KEY", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.utils.translation", "line_number": 99, "usage_type": "name"}, {"api_name": "apps.organization.models.Organization.objects.count", "line_number": 106, "usage_type": "call"}, {"api_name": "apps.organization.models.Organization.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Organization", "line_number": 106, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.count", "line_number": 107, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 107, "usage_type": "name"}, {"api_name": "apps.organization.models.Membership.objects.values", "line_number": 108, "usage_type": "call"}, {"api_name": "apps.organization.models.Membership.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Membership", "line_number": 108, "usage_type": "name"}, {"api_name": "apps.organization.models.Location.objects.count", "line_number": 109, "usage_type": "call"}, {"api_name": "apps.organization.models.Location.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Location", "line_number": 109, "usage_type": "name"}, {"api_name": "apps.organization.models.Location.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "apps.organization.models.Location.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Location", "line_number": 110, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Event.objects.order_by", "line_number": 111, "usage_type": "call"}, {"api_name": "apps.scheduling.models.Event.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Event", "line_number": 111, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Event.objects.count", "line_number": 112, "usage_type": "call"}, {"api_name": "apps.scheduling.models.Event.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Event", "line_number": 112, "usage_type": "name"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects.count", "line_number": 113, "usage_type": "call"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability", "line_number": 113, "usage_type": "name"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects.filter", "line_number": 114, "usage_type": "call"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability", "line_number": 114, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Availability.ASSIGNED", "line_number": 115, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Availability", "line_number": 115, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Availability.YES", "line_number": 115, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects.filter", "line_number": 117, "usage_type": "call"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects", "line_number": 117, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability", "line_number": 117, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Availability.ASSIGNED", "line_number": 118, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Availability", "line_number": 118, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 121, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "564557471", "text": "import os\nimport cProfile\n\nimport numpy as np\n\nfrom datetime import datetime\nfrom shutil import copyfile\nfrom random import random\nfrom math import floor\nfrom time import clock\n\nfrom actual_npylm import NestedPitmanYorLanguageModel\nfrom plot_utils import plot_line_graph\nfrom probability_utils import word_kl_divergence\n\n\nclass NestedPitmanYorLanguageModelWrapper:\n @staticmethod\n def load_text(path, avoid_spaces=False):\n \"\"\"\n Loads the text in the file located at path and returns it as a list of sentences. \n Also returns the symbol alphabet used in the text.\n The file must be structured as one sentence per line.\n\n :param path: Path to file containing text. \n :param avoid_spaces: Whether to ignore spaces in the generated alphabet.\n :return: Tuple of list of sentences and alphabet used in sentences.\n \"\"\"\n with open(path, \"r\", encoding=\"utf8\") as f:\n sentences = f.readlines()\n sentences = [x.strip() for x in sentences]\n\n alphabet = dict()\n for s in sentences:\n for ch in s:\n if ch not in alphabet:\n alphabet[ch] = 1\n\n # Remove space char if necessary.\n if avoid_spaces:\n if \" \" in alphabet:\n del alphabet[\" \"]\n\n return sentences, alphabet\n\n @staticmethod\n def get_mean_word_length(sentences):\n \"\"\"\n Computes the mean word length of the input list of sentences.\n \n :param sentences: List of sentences.\n :return: Mean word length.\n \"\"\"\n\n word_count = 0\n total_word_length = 0\n\n for s in sentences:\n total_word_length += len(s.replace(\" \", \"\"))\n word_count += len(s.split())\n\n return total_word_length / word_count\n\n @staticmethod\n def write_report(model_report_file_path, model_name, total_run_time, mean_run_time, data_symbol_count,\n mean_output_word_length, mean_input_word_length, correct_segmentation_percentage, kl_divergence):\n \"\"\"\n Writes the model report to the specified file path.\n \n :param model_report_file_path: Path to report file.\n :param model_name: Name of the model.\n :param total_run_time: Total run time.\n :param mean_run_time: Mean run time per iteration.\n :param data_symbol_count: The number of different symbols in the input data.\n :param mean_output_word_length: The mean output word length.\n :param mean_input_word_length: The mean input word length.\n :param correct_segmentation_percentage: The percentage of (absolutely) correct segmentations.\n :param kl_divergence: The KL divergence between the output and the input.\n \"\"\"\n\n s = \"\"\n s += \"Report for \" + model_name + \"\\n\\n\"\n s += \"\\nBenchmarks:\\nTotal run time: \\t\\t\\t\" + str(total_run_time) + \"\\n\"\n s += \"Mean run time: \\t\\t\\t\\t\" + str(mean_run_time) + \"\\n\"\n s += \"\\nPerformance:\\nData symbol count: \\t\\t\\t\" + str(data_symbol_count) + \"\\n\"\n s += \"Mean output word length: \\t\\t\" + str(mean_output_word_length) + \"\\n\"\n s += \"Mean input word length: \\t\\t\" + str(mean_input_word_length) + \"\\n\"\n s += \"Correct segmentation percentage: \\t\" + str(correct_segmentation_percentage) + \"\\n\"\n s += \"KL divergence: \\t\\t\\t\\t\" + str(kl_divergence) + \"\\n\"\n\n with open(model_report_file_path, \"w\") as f:\n f.write(s)\n\n @staticmethod\n def generate_plots(path, analysis_data):\n \"\"\"\n Generates plots and saves them as .png into the specified folder.\n \n :param path: Path to output folder.\n :param analysis_data: Analysis data used for generating plots. It must be a list with elements\n of form (iteration index, run time, KL divergence).\n \"\"\"\n\n path_to_runtime_plot = os.path.join(path, \"Runtime Plot.png\")\n path_to_kl_plot = os.path.join(path, \"KL Divergence Plot.png\")\n\n plot_line_graph(analysis_data, ((\"Iteration\", 0), (\"Runtime\", 1)), path_to_runtime_plot)\n plot_line_graph(analysis_data, ((\"Iteration\", 0), (\"KL Divergence\", 2)), path_to_kl_plot)\n\n @staticmethod\n def run_analysis(model_name, path_to_data, output_folder_path, iterations, max_word_length, analysis_frequency):\n \"\"\"\n Runs the NPYLM on the data found in the file at the provided path for the provided number\n of iterations. Generates analysis data with the frequency given. Outputs all results (including\n serialized models to a new folder created in the output_folder_path folder.\n \n :param model_name: The name of the model.\n :param path_to_data: Path to the file containing the segmented data.\n :param output_folder_path: Path to the folder to which to write analysis data.\n :param iterations: Number of iterations for which to run the sampler.\n :param max_word_length: Max number of symbols per word.\n :param analysis_frequency: The frequency at which to probe the sampler (e.g. for\n analysis_frequency = 10, it will generate analysis data\n at every 10th iteration.\n \"\"\"\n\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n print(\"Loaded data\\n\")\n\n # Create output folder.\n now = datetime.now()\n model_output_folder_name = \"[NPYLM][\" + model_name + \"]\" + \\\n \"[\" + str(now.day) + \"_\" + str(now.month) + \"]\" + \\\n \"[\" + str(now.hour) + \"_\" + str(now.minute) + \"]\"\n model_output_folder_path = os.path.join(output_folder_path, model_output_folder_name)\n if not os.path.exists(model_output_folder_path):\n os.makedirs(model_output_folder_path)\n print(\"Created output folder.\\n\")\n\n # Create serialization folder.\n serialization_folder = os.path.join(model_output_folder_path, \"Serialized Models\")\n if not os.path.exists(serialization_folder):\n os.makedirs(serialization_folder)\n print(\"Created serialization folder\\n\")\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, max_word_length)\n output_segmentation, analysis_data, total_run_time, mean_run_time = \\\n npylm.run_sampler(sentences, iterations, analysis_frequency, serialization_folder)\n print(\"Sampling finished.\\n\")\n\n # Copy data to output folder.\n data_copy_path = os.path.join(model_output_folder_path, \"Data Copy.txt\")\n copyfile(path_to_data, data_copy_path)\n print(\"Copied dataset to output folder.\\n\")\n\n # Write the output of the sampler.\n model_output_file_path = os.path.join(model_output_folder_path, \"Output.txt\")\n with open(model_output_file_path, \"w\") as f:\n f.writelines(output_segmentation)\n print(\"NPYLM output written to file.\\n\")\n\n # Generate statistics.\n data_symbol_count = len(alphabet)\n mean_output_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(output_segmentation)\n mean_input_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(sentences)\n sentences_count = len(sentences)\n correct_segmentation_percentage = sum([output_segmentation[i] == sentences[i]\n for i in range(sentences_count)]) / sentences_count\n # if analysis_data is not None:\n # kl_divergence = analysis_data[len(analysis_data)-1][2]\n # else:\n # kl_divergence = 999.9\n #\n # # Write report.\n # model_report_file_path = os.path.join(model_output_folder_path, \"Report.txt\")\n # NestedPitmanYorLanguageModelWrapper.write_report(model_report_file_path, model_name, total_run_time,\n # mean_run_time, data_symbol_count,\n # mean_output_word_length, mean_input_word_length,\n # correct_segmentation_percentage, kl_divergence)\n # print(\"Analysis report written to file.\\n\")\n #\n # # Create plots folder.\n # plots_output_folder_path = os.path.join(model_output_folder_path, \"Plots\")\n # if not os.path.exists(plots_output_folder_path):\n # os.makedirs(plots_output_folder_path)\n # print(\"Created plots folder.\\n\")\n #\n # # Generate plots.\n # #NestedPitmanYorLanguageModelWrapper.generate_plots(plots_output_folder_path, analysis_data)\n # print(\"Generated plots.\\n\")\n\n @staticmethod\n def profile_sampler(path_to_data, max_word_length):\n \"\"\"\n This function runs the sampler for one iterations in order to extract profiling information.\n \n :param path_to_data: Path to data file.\n :param max_word_length: Max allowed word length\n \"\"\"\n iterations = 1\n analysis_frequency = 20\n\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n print(\"Loaded data\\n\")\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, max_word_length)\n output_segmentation, analysis_data, total_run_time, mean_run_time = \\\n npylm.run_sampler(sentences, iterations, analysis_frequency, \"abc\")\n print(\"Sampling finished.\\n\")\n\n # avoid dead code optimization\n print(str(total_run_time))\n\n\n @staticmethod\n def resume_analysis(model_name, path_to_data, model_output_folder_path, analysis_frequency, file_index):\n # TODO: Extract common code between this and the run_analysis function.\n \"\"\"\n Loads a serialized model and resumes the analysis.\n \n :param model_name: The name of the model.\n :param path_to_data: Path to the file containing the segmented data.\n :param model_output_folder_path: Path to the model output folder.\n :param analysis_frequency: The frequency at which to probe the sampler (e.g. for\n analysis_frequency = 10, it will generate analysis data\n at every 10th iteration.\n :param file_index: Index of the serialization file which contains the state at which to\n resume the sampler.\n \"\"\"\n\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n print(\"Loaded data\\n\")\n\n serialization_folder_path = os.path.join(model_output_folder_path, \"Serialized Models\")\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, 0)\n output_segmentation, analysis_data, total_run_time, mean_run_time = \\\n npylm.resume_analysis(sentences, analysis_frequency, serialization_folder_path, file_index)\n print(\"Sampling finished.\\n\")\n\n # Copy data to output folder.\n data_copy_path = os.path.join(model_output_folder_path, \"Data Copy.txt\")\n copyfile(path_to_data, data_copy_path)\n print(\"Copied dataset to output folder.\\n\")\n\n # Write the output of the sampler.\n model_output_file_path = os.path.join(model_output_folder_path, \"Output.txt\")\n with open(model_output_file_path, \"w\") as f:\n f.writelines(output_segmentation)\n print(\"NPYLM output written to file.\\n\")\n\n # Generate statistics.\n data_symbol_count = len(alphabet)\n mean_output_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(output_segmentation)\n mean_input_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(sentences)\n sentences_count = len(sentences)\n correct_segmentation_percentage = sum([output_segmentation[i] == sentences[i]\n for i in range(sentences_count)]) / sentences_count\n kl_divergence = analysis_data[len(analysis_data)-1][2]\n\n # Write report.\n model_report_file_path = os.path.join(model_output_folder_path, \"Report.txt\")\n NestedPitmanYorLanguageModelWrapper.write_report(model_report_file_path, model_name, total_run_time,\n mean_run_time, data_symbol_count,\n mean_output_word_length, mean_input_word_length,\n correct_segmentation_percentage, kl_divergence)\n print(\"Analysis report written to file.\\n\")\n\n # Create plots folder.\n plots_output_folder_path = os.path.join(model_output_folder_path, \"Plots\")\n if not os.path.exists(plots_output_folder_path):\n os.makedirs(plots_output_folder_path)\n print(\"Created plots folder.\\n\")\n\n # Generate plots.\n NestedPitmanYorLanguageModelWrapper.generate_plots(plots_output_folder_path, analysis_data)\n print(\"Generated plots.\\n\")\n\n @staticmethod\n def generate_output_for_serialized_model(path_to_data, path_to_model_file, path_to_output_file):\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, 0)\n npylm._deserialize_model(path_to_model_file)\n output_segmentation = npylm.run_sampler_once(sentences, True)\n\n with open(path_to_output_file, \"w\") as f:\n f.writelines(output_segmentation)\n\n\n# NestedPitmanYorLanguageModelWrapper.run_analysis(model_name=\"Dummy Model Full Data2\",\n# path_to_data=\".\\\\mobydick.txt\",\n# output_folder_path=\".\\\\Output\",\n# iterations=1,\n# max_word_length=13,\n# analysis_frequency=20)\n\n\nNestedPitmanYorLanguageModelWrapper.profile_sampler(path_to_data=\".\\\\mobydick.txt\",\n max_word_length=13)\n\ndef fbs_sum_sample_y(r, tables):\n strength_param = 1.0\n discount_param = 1.0\n\n # This is a fast bernoulli sample + count across different trials with different parameters.\n # The parameters are given by miu = theta / (*tables[i]*d) + theta).\n miu = np.array(list(range(1, tables)), dtype=float)\n miu = strength_param / ((miu * discount_param) + strength_param)\n return (miu >= r).sum()\n\n\ndef sfbs_sum_sample_y(r, tables):\n a = 1.0\n b = 1.0\n\n n = floor((a/b) * ((1-r)/r))\n return min(n, tables)\n\n\ndef bernoulli_trial(mu):\n r = random()\n if r <= mu:\n return 1\n else:\n return 0\n\n\ndef bs_sum_sample_y(tables):\n strength_param = 1.0\n discount_param = 1.0\n\n sum = 0\n for i in range(1,tables+1):\n #sum += bernoulli_trial(strength_param / (strength_param + discount_param * i))\n sum += np.random.binomial(1, strength_param / (strength_param + discount_param * i))\n\n return sum\n\n\ndef compare_sampling_functions(iterations=100, tables=20, prt=None):\n sfbs_results = list()\n bs_results = list()\n sfbs_times = list()\n bs_times = list()\n for i in range(iterations):\n if prt is not None:\n print(\"\\nTest run \" + str(i))\n\n # Measure random number generation\n t_start = clock()\n r = random()\n t_end = clock()\n t_random_gen = t_end - t_start\n\n # Measure sfbs\n t_start = clock()\n result_sfbs = sfbs_sum_sample_y(r, tables)\n t_end = clock()\n t_sfbs = t_end - t_start\n\n # Measure bs\n t_start = clock()\n result_bs = bs_sum_sample_y(tables)\n t_end = clock()\n t_bs = t_end - t_start\n\n sfbs_times.append(t_sfbs)\n bs_times.append(t_bs)\n\n # Report results:\n if prt is not None:\n print(\"Times:\\tSFBS: \" + str(t_sfbs+t_random_gen) + \"\\t|\\tBS: \" + str(t_bs))\n print(\"Results:\\tSFBS: \" + str(result_sfbs) + \"\\t|\\tBS: \" + str(result_bs))\n\n # Record results for statistical analysis\n sfbs_results.append(result_sfbs)\n bs_results.append(result_bs)\n\n m_fbs = sum(sfbs_results) / len(sfbs_results)\n m_bs = sum(bs_results) / len(bs_results)\n print(\"\\nMean results:\\tSFBS: \" + str(m_fbs) + \"\\t|\\tBS: \" + str(m_bs))\n print(\"Times:\\tSFBS: \" + str(sum(sfbs_times)) + \"\\t|\\tBS: \" + str(sum(bs_times)))\n print(\"Speedup: \" + str(sum(bs_times)/sum(sfbs_times)))\n\ndef compare_means():\n its = 400\n tables = 100\n total_count1 = 0\n total_count2 = 0\n for i in range(its):\n r = random()\n total_count1 += sfbs_sum_sample_y(r, tables)\n total_count2 += bs_sum_sample_y(tables)\n\n print(\"Mean SFBS: \" + str(total_count1 / its))\n print(\"Mean BS: \" + str(total_count2 / its))\n\n", "sub_path": "npylm_wrapper.py", "file_name": "npylm_wrapper.py", "file_ext": "py", "file_size_in_byte": 17429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "plot_utils.plot_line_graph", "line_number": 107, "usage_type": "call"}, {"api_name": "plot_utils.plot_line_graph", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 144, "usage_type": "call"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path", "line_number": 248, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path", "line_number": 253, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path", "line_number": 268, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 278, "usage_type": "call"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 316, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 325, "usage_type": "call"}, {"api_name": "random.random", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.random.binomial", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 344, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 359, "usage_type": "call"}, {"api_name": "random.random", "line_number": 360, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 361, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 365, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 367, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 371, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 373, "usage_type": "call"}, {"api_name": "random.random", "line_number": 400, "usage_type": "call"}]} +{"seq_id": "320890769", "text": "from keras.models import Model\nfrom keras.layers import Conv2D, Activation, Input\nfrom keras import optimizers\nfrom keras.models import load_model\nimport numpy as np\nimport scipy.misc\nimport scipy.ndimage\nimport cv2\nimport math\nimport glob\nimport matplotlib.pyplot as plt\n\nimg_shape = (32,32,1)\ninput_img = Input(shape=(img_shape))\nC1 = Conv2D(64,(9,9),padding='SAME',name='CONV1')(input_img)\nA1 = Activation('relu', name='act1')(C1)\nC2 = Conv2D(32,(1,1),padding='SAME',name='CONV2')(A1)\nA2 = Activation('relu', name='act2')(C2)\nC3 = Conv2D(1,(5,5),padding='SAME',name='CONV3')(A2)\nA3 = Activation('relu', name='act3')(C3)\nmodel = Model(input_img, A3)\nopt = optimizers.Adam(lr=0.0003)\nmodel.compile(optimizer=opt,loss='mean_squared_error')\nmodel.summary()\n\ndef modcrop(image, scale=2): #BY DEFAULT SCALE 2\n if len(image.shape) == 3:\n h, w, _ = image.shape\n h = h - np.mod(h, scale)\n w = w - np.mod(w, scale)\n image = image[0:h, 0:w, :]\n else:\n h, w = image.shape\n h = h - np.mod(h, scale)\n w = w - np.mod(w, scale)\n image = image[0:h, 0:w]\n return image\n\ndef create_LR(image,scale):\n label_ = modcrop(image, scale)\n label_ = label_ / 255.\n input_ = scipy.ndimage.interpolation.zoom(label_, (1./scale), prefilter=False)\n input_ = scipy.ndimage.interpolation.zoom(input_, (scale/1.), prefilter=False)\n return input_\n\npath = './img/yang91/'\nfiles_y = glob.glob(path + '*.*')\ntrainfiles = files_y[:60] #HERE TOTAL IMAGES ARE 91 , SO FROM 91 up to 85 used for Training\nvalfiles = files_y[60:] #HERE Above 85 used for Validation Set\nimg_size = 32\nstride = 16\nX_train = []\nY_train = []\nX_val = []\nY_val = []\n\nfrom matplotlib.pyplot import imread\n\n# Extract patch image for training\nfor file_y in trainfiles:\n tmp_y = scipy.ndimage.imread(file_y,flatten=True, mode='YCbCr').astype(np.float)\n tmp_X = create_LR(tmp_y,2) #############################################################SCALE###########\n h,w = tmp_y.shape\n for x in range(0, h-img_size+1, stride):\n for y in range(0, w-img_size+1, stride):\n sub_input = tmp_X[x:x+img_size,y:y+img_size].reshape(img_size,img_size,1)\n sub_label = tmp_y[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1)\n X_train.append(sub_input)\n Y_train.append(sub_label)\n\n# Extract patch image for validation\nfor file_y in valfiles:\n tmp_y = scipy.misc.imread(file_y,flatten=True, mode='YCbCr').astype(np.float)\n tmp_X = create_LR(tmp_y,2)###########################################################SCALE################\n h,w = tmp_y.shape\n for x in range(0, h-img_size+1, stride):\n for y in range(0, w-img_size+1, stride):\n sub_input = tmp_X[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1) # [32 x 32]\n sub_label = tmp_y[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1) # [32 x 32]\n X_val.append(sub_input)\n Y_val.append(sub_label)\n\nX_train = np.array(X_train)\nY_train = np.array(Y_train)\nX_val = np.array(X_val)\nY_val = np.array(Y_val)\n\nmodel.fit(X_train, Y_train, batch_size = 128, epochs = 30, validation_data=(X_val, Y_val))\nmodel.save('wscale2.h5')\n\nimg_o = scipy.misc.imread('./img/baby_x2_GT.png',flatten=True,mode='YCbCr').astype(np.float)\nimg = create_LR(img_o,2) #################################################################SCALE#################\nimg_size = 32\nstride = 16\nh,w = img.shape\npiece_wise = []\nfor x in range(0, h-img_size+1, stride):\n for y in range (0, w-img_size+1, stride):\n sub_input = img[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1)\n piece_wise.append(sub_input)\ninput_ = np.asarray(piece_wise)\nsrcnn = load_model('wscale2.h5')\nhat = srcnn.predict(input_)\nimg_re = np.zeros(img.shape)\ni = 0\nfor x in range(0, h-img_size+1, stride):\n for y in range (0, w-img_size+1, stride):\n img_re[x:x+img_size, y:y+img_size] = hat[i].reshape(img_size,img_size)\n i += 1\ncv2.imwrite('restored1.bmp', img_re)\ncv2.imwrite('HR1.bmp', img_o)\nimg_save = (img*255).astype(np.uint8)\ncv2.imwrite('blurred1.bmp',img_save)\n\n#CALCULATE PSNR\noriginal = cv2.imread(\"HR1.bmp\")\nLR = cv2.imread(\"blurred1.bmp\")\ncontrast = cv2.imread(\"restored1.bmp\",1)\n\ndef psnr(img1, img2):\n mse = np.mean((img1-img2)**2)\n if mse ==0:\n return 100\n PIXEL_MAX = 255.0\n return 20* math.log10(PIXEL_MAX / math.sqrt(mse))\nd = psnr(original,contrast)\nprint(d)\n\nfig = plt.figure(figsize = (14,14), dpi = 100)\nax = plt.subplot(\"131\")\nax.imshow(original)\nax.set_title(\"GT\")\nplt.grid(0)\n\nax = plt.subplot(\"132\")\nax.imshow(LR)\nax.set_title(\"blurred_Image\")\nplt.grid(0)\n\nax = plt.subplot(\"133\")\nax.imshow(contrast)\nax.set_title(\"HR_RECONSTRUCTED\")\nplt.grid(0)\nplt.show()\n\n", "sub_path": "04.image_video_bm/SRCNN/srcnn_keras.py", "file_name": "srcnn_keras.py", "file_ext": "py", "file_size_in_byte": 4791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.layers.Input", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.mod", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage.interpolation.zoom", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage", "line_number": 42, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 42, "usage_type": "name"}, {"api_name": "scipy.misc.ndimage.interpolation.zoom", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage", "line_number": 43, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 43, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage.imread", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage", "line_number": 61, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 61, "usage_type": "attribute"}, {"api_name": "scipy.misc.misc.imread", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 73, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.misc.misc.imread", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 91, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 112, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 121, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 125, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}]} +{"seq_id": "441429683", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Registrado\n\nclass AdminRegistrado(admin.ModelAdmin):\n\tlist_display = [\"__str__\", \"nombre\", \"codigo_postal\", \"timestap\", \"actualizado\"]\n\tclass Meta:\n\t\tmodel = Registrado\n\nadmin.site.register(Registrado, AdminRegistrado)\n", "sub_path": "boletin/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Registrado", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Registrado", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "219136152", "text": "'''Utilities relating to installing services\n\n************************************************************************\nFOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE\nSHOULD ALSO BE APPLIED TO sdk_install IN ANY OTHER PARTNER REPOS\n************************************************************************\n'''\nimport logging\nimport time\n\nimport dcos.cosmos\nimport dcos.errors\nimport dcos.marathon\nimport dcos.packagemanager\nimport dcos.subcommand\nimport retrying\nimport shakedown\n\nimport sdk_cmd\nimport sdk_marathon\nimport sdk_plan\nimport sdk_utils\n\nlog = logging.getLogger(__name__)\n\nTIMEOUT_SECONDS = 15 * 60\n\n'''List of services which are currently installed via install().\nUsed by post-test diagnostics to retrieve stuff from currently running services.'''\n_installed_service_names = set([])\n\n'''List of dead agents which should be ignored when checking for orphaned resources.\nUsed by uninstall when validating that an uninstall completed successfully.'''\n_dead_agent_hosts = set([])\n\n\ndef get_installed_service_names() -> set:\n '''Returns the a set of service names which had been installed via sdk_install in this session.'''\n return _installed_service_names\n\n\n@retrying.retry(stop_max_attempt_number=3,\n retry_on_exception=lambda e: isinstance(e, dcos.errors.DCOSException))\ndef _retried_install_impl(\n package_name,\n service_name,\n expected_running_tasks,\n options={},\n package_version=None,\n timeout_seconds=TIMEOUT_SECONDS):\n '''Cleaned up version of shakedown's package_install().'''\n package_manager = dcos.packagemanager.PackageManager(dcos.cosmos.get_cosmos_url())\n pkg = package_manager.get_package_version(package_name, package_version)\n\n if package_version is None:\n # Get the resolved version for logging below\n package_version = 'auto:{}'.format(pkg.version())\n\n log.info('Installing package={} service={} with options={} version={}'.format(\n package_name, service_name, options, package_version))\n\n # Trigger package install, but only if it's not already installed.\n # We expect upstream to have confirmed that it wasn't already installed beforehand.\n if sdk_marathon.app_exists(service_name):\n log.info('Marathon app={} exists, skipping package install call'.format(service_name))\n else:\n package_manager.install_app(pkg, options)\n\n # Install CLI while package starts to install\n if pkg.cli_definition():\n log.info('Installing CLI for package={}'.format(package_name))\n dcos.subcommand.install(pkg)\n\n # Wait for expected tasks to come up\n if expected_running_tasks > 0:\n shakedown.wait_for_service_tasks_running(\n service_name, expected_running_tasks, timeout_seconds)\n\n # Wait for completed marathon deployment\n app_id = pkg.marathon_json(options).get('id')\n shakedown.deployment_wait(timeout_seconds, app_id)\n\n\ndef install(\n package_name,\n service_name,\n expected_running_tasks,\n additional_options={},\n package_version=None,\n timeout_seconds=TIMEOUT_SECONDS,\n wait_for_deployment=True,\n insert_strict_options=True):\n start = time.time()\n\n # If the package is already installed at this point, fail immediately.\n if sdk_marathon.app_exists(service_name):\n raise Exception('Service is already installed: {}'.format(service_name))\n\n if insert_strict_options and sdk_utils.is_strict_mode():\n # strict mode requires correct principal and secret to perform install.\n # see also: sdk_security.py\n options = sdk_utils.merge_dictionaries({\n 'service': {\n 'service_account': 'service-acct',\n 'principal': 'service-acct',\n 'service_account_secret': 'secret',\n 'secret_name': 'secret'\n }\n }, additional_options)\n else:\n options = additional_options\n\n # 1. Install package, wait for tasks, wait for marathon deployment\n _retried_install_impl(\n package_name,\n service_name,\n expected_running_tasks,\n options,\n package_version,\n timeout_seconds)\n\n # 2. Wait for the scheduler to be idle (as implied by deploy plan completion and suppressed bit)\n # This should be skipped ONLY when it's known that the scheduler will be stuck in an incomplete\n # state, or if the thing being installed doesn't have a deployment plan (e.g. standalone app)\n if wait_for_deployment:\n # this can take a while, default is 15 minutes. for example with HDFS, we can hit the expected\n # total task count via FINISHED tasks, without actually completing deployment\n log.info('Waiting for package={} service={} to finish deployment plan...'.format(\n package_name, service_name))\n sdk_plan.wait_for_completed_deployment(service_name, timeout_seconds)\n\n log.info('Installed package={} service={} after {}'.format(\n package_name, service_name, shakedown.pretty_duration(time.time() - start)))\n\n global _installed_service_names\n _installed_service_names.add(service_name)\n\n\n@retrying.retry(stop_max_attempt_number=5,\n wait_fixed=5000,\n retry_on_exception=lambda e: isinstance(e, Exception))\ndef _retried_run_janitor(service_name):\n auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token', print_output=False).strip()\n\n cmd_list = [\"docker\", \"run\", \"mesosphere/janitor\", \"/janitor.py\",\n \"-r\", sdk_utils.get_role(service_name),\n \"-p\", service_name + '-principal',\n \"-z\", sdk_utils.get_zk_path(service_name),\n \"--auth_token={}\".format(auth_token)]\n\n sdk_cmd.master_ssh(\" \".join(cmd_list))\n\n\n@retrying.retry(stop_max_attempt_number=5,\n wait_fixed=5000,\n retry_on_exception=lambda e: isinstance(e, Exception))\ndef _retried_uninstall_package_and_wait(*args, **kwargs):\n shakedown.uninstall_package_and_wait(*args, **kwargs)\n\n\ndef _verify_completed_uninstall(service_name):\n state_summary = sdk_cmd.cluster_request('GET', '/mesos/state-summary').json()\n\n # There should be no orphaned resources in the state summary (DCOS-30314)\n orphaned_resources = 0\n ignored_orphaned_resources = 0\n service_role = sdk_utils.get_role(service_name)\n for agent in state_summary['slaves']:\n # resources should be grouped by role. check for any resources in our expected role:\n matching_reserved_resources = agent['reserved_resources'].get(service_role)\n if matching_reserved_resources:\n if agent['hostname'] in _dead_agent_hosts:\n # The test told us ahead of time to expect orphaned resources on this host.\n log.info('Ignoring orphaned resources on agent {}/{}: {}'.format(\n agent['id'], agent['hostname'], matching_reserved_resources))\n ignored_orphaned_resources += len(matching_reserved_resources)\n else:\n log.error('Orphaned resources on agent {}/{}: {}'.format(\n agent['id'], agent['hostname'], matching_reserved_resources))\n orphaned_resources += len(matching_reserved_resources)\n if orphaned_resources:\n log.error('{} orphaned resources (plus {} ignored) after uninstall of {}'.format(\n orphaned_resources, ignored_orphaned_resources, service_name))\n log.error(state_summary)\n raise Exception('Found {} orphaned resources (plus {} ignored) after uninstall of {}'.format(\n orphaned_resources, ignored_orphaned_resources, service_name))\n elif ignored_orphaned_resources:\n log.info('Ignoring {} orphaned resources after uninstall of {}'.format(\n ignored_orphaned_resources, service_name))\n log.info(state_summary)\n else:\n log.info('No orphaned resources for role {} were found'.format(service_role))\n\n # There should be no framework entry for this service in the state summary (DCOS-29474)\n orphaned_frameworks = [fwk for fwk in state_summary['frameworks'] if fwk['name'] == service_name]\n if orphaned_frameworks:\n log.error('{} orphaned frameworks named {} after uninstall of {}: {}'.format(\n len(orphaned_frameworks), service_name, service_name, orphaned_frameworks))\n log.error(state_summary)\n raise Exception('Found {} orphaned frameworks named {} after uninstall of {}: {}'.format(\n len(orphaned_frameworks), service_name, service_name, orphaned_frameworks))\n log.info('No orphaned frameworks for service {} were found'.format(service_name))\n\n\ndef ignore_dead_agent(agent_host):\n '''Marks the specified agent as destroyed. When uninstall() is next called, any orphaned\n resources against this agent will be logged but will not result in a thrown exception.\n '''\n _dead_agent_hosts.add(agent_host)\n log.info('Added {} to expected dead agents for resource validation purposes: {}'.format(\n agent_host, _dead_agent_hosts))\n\n\ndef uninstall(package_name, service_name):\n '''Uninstalls the specified service from the cluster, and verifies that its resources and\n framework were correctly cleaned up after the uninstall has completed. Any agents which are\n expected to have orphaned resources (e.g. due to being shut down) should be passed to\n ignore_dead_agent() before triggering the uninstall.\n '''\n start = time.time()\n\n log.info('Uninstalling {}'.format(service_name))\n\n try:\n _retried_uninstall_package_and_wait(package_name, service_name=service_name)\n except Exception:\n log.exception('Got exception when uninstalling {}'.format(service_name))\n raise\n\n cleanup_start = time.time()\n\n try:\n if sdk_utils.dcos_version_less_than('1.10'):\n # 1.9 and earlier: Run janitor to unreserve resources\n log.info('Janitoring {}'.format(service_name))\n _retried_run_janitor(service_name)\n else:\n # 1.10 and later: Wait for uninstall scheduler to finish and be removed by Cosmos\n log.info('Waiting for Marathon app to be removed {}'.format(service_name))\n sdk_marathon.retried_wait_for_deployment_and_app_removal(\n sdk_marathon.get_app_id(service_name), timeout=TIMEOUT_SECONDS)\n except Exception:\n log.exception('Got exception when cleaning up {}'.format(service_name))\n raise\n\n finish = time.time()\n\n log.info(\n 'Uninstalled {} after pkg({}) + cleanup({}) = total({})'.format(\n service_name,\n shakedown.pretty_duration(cleanup_start - start),\n shakedown.pretty_duration(finish - cleanup_start),\n shakedown.pretty_duration(finish - start)))\n\n # Sanity check: Verify that all resources and the framework have been successfully cleaned up,\n # and throw an exception if anything is left over (uninstall bug?)\n _verify_completed_uninstall(service_name)\n\n # Finally, remove the service from the installed list (used by sdk_diag)\n global _installed_service_names\n try:\n _installed_service_names.remove(service_name)\n except KeyError:\n pass # Expected when tests preemptively uninstall at start of test\n", "sub_path": "testing/sdk_install.py", "file_name": "sdk_install.py", "file_ext": "py", "file_size_in_byte": 11282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "dcos.cosmos.packagemanager.PackageManager", "line_number": 52, "usage_type": "call"}, {"api_name": "dcos.cosmos.packagemanager", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dcos.cosmos", "line_number": 52, "usage_type": "name"}, {"api_name": "dcos.cosmos.cosmos.get_cosmos_url", "line_number": 52, "usage_type": "call"}, {"api_name": "dcos.cosmos.cosmos", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sdk_marathon.app_exists", "line_number": 64, "usage_type": "call"}, {"api_name": "dcos.cosmos.subcommand.install", "line_number": 72, "usage_type": "call"}, {"api_name": "dcos.cosmos.subcommand", "line_number": 72, "usage_type": "attribute"}, {"api_name": "dcos.cosmos", "line_number": 72, "usage_type": "name"}, {"api_name": "shakedown.wait_for_service_tasks_running", "line_number": 76, "usage_type": "call"}, {"api_name": "shakedown.deployment_wait", "line_number": 81, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 42, "usage_type": "call"}, {"api_name": "dcos.cosmos.errors", "line_number": 43, "usage_type": "attribute"}, {"api_name": "dcos.cosmos", "line_number": 43, "usage_type": "name"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "sdk_marathon.app_exists", "line_number": 96, "usage_type": "call"}, {"api_name": "sdk_utils.is_strict_mode", "line_number": 99, "usage_type": "call"}, {"api_name": "sdk_utils.merge_dictionaries", "line_number": 102, "usage_type": "call"}, {"api_name": "sdk_plan.wait_for_completed_deployment", "line_number": 130, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 133, "usage_type": "call"}, {"api_name": "time.time", "line_number": 133, "usage_type": "call"}, {"api_name": "sdk_cmd.run_cli", "line_number": 143, "usage_type": "call"}, {"api_name": "sdk_utils.get_role", "line_number": 146, "usage_type": "call"}, {"api_name": "sdk_utils.get_zk_path", "line_number": 148, "usage_type": "call"}, {"api_name": "sdk_cmd.master_ssh", "line_number": 151, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 139, "usage_type": "call"}, {"api_name": "shakedown.uninstall_package_and_wait", "line_number": 158, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 154, "usage_type": "call"}, {"api_name": "sdk_cmd.cluster_request", "line_number": 162, "usage_type": "call"}, {"api_name": "sdk_utils.get_role", "line_number": 167, "usage_type": "call"}, {"api_name": "time.time", "line_number": 220, "usage_type": "call"}, {"api_name": "time.time", "line_number": 230, "usage_type": "call"}, {"api_name": "sdk_utils.dcos_version_less_than", "line_number": 233, "usage_type": "call"}, {"api_name": "sdk_marathon.retried_wait_for_deployment_and_app_removal", "line_number": 240, "usage_type": "call"}, {"api_name": "sdk_marathon.get_app_id", "line_number": 241, "usage_type": "call"}, {"api_name": "time.time", "line_number": 246, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 251, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 252, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 253, "usage_type": "call"}]} +{"seq_id": "56311996", "text": "# -*- coding: utf-8 -*-\n\nimport re\n\nimport pytest\nfrom django.core.urlresolvers import reverse\nfrom django_dynamic_fixture import G\nfrom pyquery import PyQuery as pq\n\nfrom readthedocs.builds.constants import LATEST\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import HTMLFile, Project\nfrom readthedocs.search.tests.utils import (\n get_search_query_from_project_file,\n DATA_TYPES_VALUES,\n)\n\n\n@pytest.mark.django_db\n@pytest.mark.search\nclass TestProjectSearch:\n url = reverse('search')\n\n def _get_search_result(self, url, client, search_params):\n resp = client.get(url, search_params)\n assert resp.status_code == 200\n\n results = resp.context['results']\n facets = resp.context['facets']\n\n return results, facets\n\n def test_search_by_project_name(self, client, project, all_projects):\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': project.name },\n )\n\n assert len(results) == 1\n assert project.name.encode('utf-8') in results[0].name.encode('utf-8')\n for proj in all_projects[1:]:\n assert proj.name.encode('utf-8') not in results[0].name.encode('utf-8')\n\n def test_search_project_have_correct_language_facets(self, client, project):\n \"\"\"Test that searching project should have correct language facets in the results\"\"\"\n # Create a project in bn and add it as a translation\n G(Project, language='bn', name=project.name)\n\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': project.name },\n )\n\n lang_facets = facets['language']\n lang_facets_str = [facet[0] for facet in lang_facets]\n # There should be 2 languages\n assert len(lang_facets) == 2\n assert sorted(lang_facets_str) == sorted(['en', 'bn'])\n for facet in lang_facets:\n assert facet[2] == False # because none of the facets are applied\n\n def test_search_project_filter_language(self, client, project):\n \"\"\"Test that searching project filtered according to language.\"\"\"\n # Create a project in bn and add it as a translation\n translate = G(Project, language='bn', name=project.name)\n search_params = { 'q': project.name, 'language': 'bn' }\n\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n # There should be only 1 result\n assert len(results) == 1\n\n lang_facets = facets['language']\n lang_facets_str = [facet[0] for facet in lang_facets]\n\n # There should be 2 languages because both `en` and `bn` should show there\n assert len(lang_facets) == 2\n assert sorted(lang_facets_str) == sorted(['en', 'bn'])\n\n\n@pytest.mark.django_db\n@pytest.mark.search\nclass TestPageSearch(object):\n url = reverse('search')\n\n def _get_search_result(self, url, client, search_params):\n resp = client.get(url, search_params)\n assert resp.status_code == 200\n\n results = resp.context['results']\n facets = resp.context['facets']\n\n return results, facets\n\n def _get_highlight(self, result, data_type):\n # if query is from page title,\n # highlighted title is present in 'result.meta.highlight.title'\n if data_type == 'title':\n highlight = result.meta.highlight.title\n\n # if result is not from page title,\n # then results and highlighted results are present inside 'inner_hits'\n else:\n inner_hits = result.meta.inner_hits\n assert len(inner_hits) >= 1\n\n # checking first inner_hit\n inner_hit_0 = inner_hits[0]\n expected_type = data_type.split('.')[0] # can be either 'sections' or 'domains'\n assert inner_hit_0['type'] == expected_type\n highlight = inner_hit_0['highlight'][data_type]\n\n return highlight\n\n def _get_highlighted_words(self, string):\n highlighted_words = re.findall(\n '(.*?)',\n string\n )\n return highlighted_words\n\n @pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)\n @pytest.mark.parametrize('page_num', [0, 1])\n def test_file_search(self, client, project, data_type, page_num):\n query = get_search_query_from_project_file(\n project_slug=project.slug,\n page_num=page_num,\n data_type=data_type\n )\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' }\n )\n assert len(results) >= 1\n\n # checking first result\n result_0 = results[0]\n highlight = self._get_highlight(result_0, data_type)\n assert len(highlight) == 1\n\n highlighted_words = self._get_highlighted_words(highlight[0])\n assert len(highlighted_words) >= 1\n for word in highlighted_words:\n # Make it lower because our search is case insensitive\n assert word.lower() in query.lower()\n\n def test_file_search_have_correct_role_name_facets(self, client):\n \"\"\"Test that searching files should result all role_names.\"\"\"\n\n # searching for 'celery' to test that\n # correct role_names are displayed\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': 'celery', 'type': 'file' }\n )\n assert len(results) >= 1\n role_name_facets = facets['role_name']\n role_name_facets_str = [facet[0] for facet in role_name_facets]\n expected_role_names = ['py:class', 'py:function', 'py:method']\n assert sorted(expected_role_names) == sorted(role_name_facets_str)\n for facet in role_name_facets:\n assert facet[2] == False # because none of the facets are applied\n\n def test_file_search_filter_role_name(self, client):\n \"\"\"Test that searching files filtered according to role_names.\"\"\"\n\n search_params = { 'q': 'celery', 'type': 'file' }\n # searching without the filter\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params\n )\n assert len(results) >= 2 # there are > 1 results without the filter\n role_name_facets = facets['role_name']\n for facet in role_name_facets:\n assert facet[2] == False # because none of the facets are applied\n\n confval_facet = 'py:class'\n # checking if 'py:class' facet is present in results\n assert confval_facet in [facet[0] for facet in role_name_facets]\n\n # filtering with role_name=py:class\n search_params['role_name'] = confval_facet\n new_results, new_facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params\n )\n new_role_names_facets = new_facets['role_name']\n # there is only one result with role_name='py:class'\n # in `signals` page\n assert len(new_results) == 1\n first_result = new_results[0] # first result\n inner_hits = first_result.meta.inner_hits # inner_hits of first results\n assert len(inner_hits) >= 1\n inner_hit_0 = inner_hits[0] # first inner_hit\n assert inner_hit_0.type == 'domains'\n assert inner_hit_0.source.role_name == confval_facet\n\n for facet in new_role_names_facets:\n if facet[0] == confval_facet:\n assert facet[2] == True # because 'std:confval' filter is active\n else:\n assert facet[2] == False\n\n @pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)\n @pytest.mark.parametrize('case', ['upper', 'lower', 'title'])\n def test_file_search_case_insensitive(self, client, project, case, data_type):\n \"\"\"\n Check File search is case insensitive.\n\n It tests with uppercase, lowercase and camelcase.\n \"\"\"\n query_text = get_search_query_from_project_file(\n project_slug=project.slug,\n data_type=data_type\n )\n cased_query = getattr(query_text, case)\n query = cased_query()\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' }\n )\n assert len(results) >= 1\n\n first_result = results[0]\n highlight = self._get_highlight(first_result, data_type)\n assert len(highlight) == 1\n highlighted_words = self._get_highlighted_words(highlight[0])\n assert len(highlighted_words) >= 1\n for word in highlighted_words:\n assert word.lower() in query.lower()\n\n def test_file_search_exact_match(self, client, project):\n \"\"\"\n Check quoted query match exact phrase.\n\n Making a query with quoted text like ``\"foo bar\"`` should match exactly\n ``foo bar`` phrase.\n \"\"\"\n\n # `Sphinx` word is present both in `kuma` and `docs` files\n # But the phrase `Sphinx uses` is present only in `kuma` docs.\n # So search with this phrase to check\n query = r'\"Sphinx uses\"'\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' })\n\n # there must be only 1 result\n # because the phrase is present in\n # only one project\n assert len(results) == 1\n assert results[0].project == 'kuma'\n assert results[0].path == 'testdocumentation'\n\n inner_hits = results[0].meta.inner_hits\n assert len(inner_hits) == 1\n assert inner_hits[0].type == 'sections'\n highlight = self._get_highlight(results[0], 'sections.content')\n assert len(highlight) == 1\n highlighted_words = self._get_highlighted_words(highlight[0])\n assert len(highlighted_words) >= 1\n for word in highlighted_words:\n assert word.lower() in query.lower()\n\n def test_file_search_have_correct_project_facets(self, client, all_projects):\n \"\"\"Test that file search have correct project facets in results\"\"\"\n\n # `environment` word is present both in `kuma` and `docs` files\n # so search with this phrase\n query = 'environment'\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' },\n )\n # There should be 2 search result\n assert len(results) == 2\n project_facets = facets['project']\n project_facets_str = [facet[0] for facet in project_facets]\n assert len(project_facets_str) == 2\n\n # kuma and pipeline should be there\n assert sorted(project_facets_str) == sorted(['kuma', 'docs'])\n\n def test_file_search_filter_by_project(self, client):\n \"\"\"Test that search result are filtered according to project.\"\"\"\n\n # `environment` word is present both in `kuma` and `docs` files\n # so search with this phrase but filter through `kuma` project\n search_params = {\n 'q': 'environment',\n 'type': 'file',\n 'project': 'kuma'\n }\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n project_facets = facets['project']\n resulted_project_facets = [ facet[0] for facet in project_facets ]\n\n # There should be 1 search result as we have filtered\n assert len(results) == 1\n # kuma should should be there only\n assert 'kuma' == results[0].project\n\n # But there should be 2 projects in the project facets\n # as the query is present in both projects\n assert sorted(resulted_project_facets) == sorted(['kuma', 'docs'])\n\n @pytest.mark.xfail(reason='Versions are not showing correctly! Fixme while rewrite!')\n def test_file_search_show_versions(self, client, all_projects, es_index, settings):\n # override the settings to index all versions\n settings.INDEX_ONLY_LATEST = False\n\n project = all_projects[0]\n # Create some versions of the project\n versions = [G(Version, project=project) for _ in range(3)]\n query = get_search_query_from_project_file(project_slug=project.slug)\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' },\n )\n\n # Results can be from other projects also\n assert len(results) >= 1\n\n version_facets = facets['version']\n version_facets_str = [facet[0] for facet in version_facets]\n # There should be total 4 versions\n # one is latest, and other 3 that we created above\n assert len(version_facets) == 4\n\n project_versions = [v.slug for v in versions] + [LATEST]\n assert sorted(project_versions) == sorted(resulted_version_facets)\n\n def test_file_search_subprojects(self, client, all_projects, es_index):\n \"\"\"\n TODO: File search should return results from subprojects also.\n\n This is currently disabled because the UX around it is weird.\n You filter by a project, and get results for multiple.\n \"\"\"\n project = all_projects[0]\n subproject = all_projects[1]\n # Add another project as subproject of the project\n project.add_subproject(subproject)\n\n # Now search with subproject content but explicitly filter by the parent project\n query = get_search_query_from_project_file(project_slug=subproject.slug)\n search_params = {\n 'q': query,\n 'type': 'file',\n 'project': project.slug,\n }\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n assert len(results) == 0\n\n def test_search_page_size(self, client, all_projects):\n query = 'are'\n search_params = {'q': query, 'type': 'file'}\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n # There should be 3 search result\n assert len(results) == 3\n\n search_params['page_size'] = 2\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n assert len(results) == 2\n\n search_params['page_size'] = 'not_number'\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n assert len(results) == 3\n\n search_params['page_size'] = ''\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n assert len(results) == 3\n\n", "sub_path": "readthedocs/search/tests/test_views.py", "file_name": "test_views.py", "file_ext": "py", "file_size_in_byte": 15255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.core.urlresolvers.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "django_dynamic_fixture.G", "line_number": 48, "usage_type": "call"}, {"api_name": "readthedocs.projects.models.Project", "line_number": 48, "usage_type": "argument"}, {"api_name": "django_dynamic_fixture.G", "line_number": 67, "usage_type": "call"}, {"api_name": "readthedocs.projects.models.Project", "line_number": 67, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 90, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 122, "usage_type": "call"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 131, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 128, "usage_type": "call"}, {"api_name": "readthedocs.search.tests.utils.DATA_TYPES_VALUES", "line_number": 128, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 129, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 129, "usage_type": "attribute"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 223, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 215, "usage_type": "call"}, {"api_name": "readthedocs.search.tests.utils.DATA_TYPES_VALUES", "line_number": 215, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 215, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 216, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 216, "usage_type": "attribute"}, {"api_name": "django_dynamic_fixture.G", "line_number": 333, "usage_type": "call"}, {"api_name": "readthedocs.builds.models.Version", "line_number": 333, "usage_type": "argument"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 334, "usage_type": "call"}, {"api_name": "readthedocs.builds.constants.LATEST", "line_number": 350, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 326, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 326, "usage_type": "attribute"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 366, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 88, "usage_type": "attribute"}]} +{"seq_id": "273947389", "text": "# -*- coding: utf-8 -*-\n###################################################################################\n#\n# Cybrosys Technologies Pvt. Ltd.\n#\n# Copyright (C) 2019-TODAY Cybrosys Technologies().\n# This program is free software: you can modify\n# it under the terms of the GNU Affero General Public License (AGPL) as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n###################################################################################\n\nimport json\n\nfrom odoo.addons.http_routing.models.ir_http import slug, unslug\nfrom odoo.addons.website.controllers.main import QueryURL\nfrom odoo.addons.website_blog.controllers.main import WebsiteBlog\n\nfrom odoo import http, fields, SUPERUSER_ID\nfrom odoo.http import request\n\n\nclass BlogInherit(WebsiteBlog):\n \"\"\"Override class WebsiteBlog\"\"\"\n @http.route(['/blog',\n '''/blog/''',\n '''/blog//page/''',\n '''/blog//tag/''',\n '''/blog//tag//page/''',\n '''/blog/search_content''',\n ], type='http', auth=\"public\", website=True, csrf=False)\n def blog(self, blog=None, tag=None, page=1, **opt):\n \"\"\"function related to blog display\"\"\"\n date_begin, date_end, state = opt.get('date_begin'), opt.get('date_end'), opt.get('state')\n published_count, unpublished_count = 0, 0\n\n domain = request.website.website_domain()\n blog_post = request.env['blog.post']\n blogs = request.env['blog.blog'].search(domain, order=\"create_date asc\", limit=2)\n # retrocompatibility to accept tag as slug\n active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] if tag else []\n if active_tag_ids:\n fixed_tag_slug = \",\".join(slug(t) for t in request.env['blog.tag'].browse(active_tag_ids))\n if fixed_tag_slug != tag:\n return request.redirect(\n request.httprequest.full_path.replace(\"/tag/%s/\" % tag, \"/tag/%s/\" % fixed_tag_slug, 1), 301)\n domain += [('tag_ids', 'in', active_tag_ids)]\n if blog:\n domain += [('blog_id', '=', blog.id)]\n if date_begin and date_end:\n domain += [(\"post_date\", \">=\", date_begin), (\"post_date\", \"<=\", date_end)]\n\n if request.env.user.has_group('website.group_website_designer'):\n count_domain = domain + [(\"website_published\", \"=\", True), (\"post_date\", \"<=\", fields.Datetime.now())]\n published_count = blog_post.search_count(count_domain)\n unpublished_count = blog_post.search_count(domain) - published_count\n\n if state == \"published\":\n domain += [(\"website_published\", \"=\", True), (\"post_date\", \"<=\", fields.Datetime.now())]\n elif state == \"unpublished\":\n domain += ['|', (\"website_published\", \"=\", False), (\"post_date\", \">\", fields.Datetime.now())]\n else:\n domain += [(\"post_date\", \"<=\", fields.Datetime.now())]\n\n blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)\n\n search_string = opt.get('search', None)\n\n blog_posts = blog_post.search([('name', 'ilike', search_string)],\n offset=(page - 1) * self._blog_post_per_page,\n limit=self._blog_post_per_page) if search_string \\\n else blog_post.search(domain,\n order=\"post_date desc\")\n\n pager = request.website.pager(\n url=request.httprequest.path.partition('/page/')[0],\n total=len(blog_posts),\n page=page,\n step=self._blog_post_per_page,\n url_args=opt,\n )\n pager_begin = (page - 1) * self._blog_post_per_page\n pager_end = page * self._blog_post_per_page\n blog_posts = blog_posts[pager_begin:pager_end]\n\n all_tags = request.env['blog.tag'].search([])\n use_cover = request.website.viewref('website_blog.opt_blog_cover_post').active\n fullwidth_cover = request.website.viewref('website_blog.opt_blog_cover_post_fullwidth_design').active\n offset = (page - 1) * self._blog_post_per_page\n first_post = blog_posts\n if not blog:\n first_post = blog_posts.search(domain + [('website_published', '=', True)], order=\"post_date desc, id asc\",\n limit=1)\n if use_cover and not fullwidth_cover:\n offset += 1\n\n # function to create the string list of tag ids, and toggle a given one.\n # used in the 'Tags Cloud' template.\n\n def tags_list(tag_ids, current_tag):\n tag_ids = list(tag_ids) # required to avoid using the same list\n if current_tag in tag_ids:\n tag_ids.remove(current_tag)\n else:\n tag_ids.append(current_tag)\n tag_ids = request.env['blog.tag'].browse(tag_ids).exists()\n return ','.join(slug(tags) for tags in tag_ids)\n\n tag_category = sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper())\n other_tags = sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tags: tags.name.upper())\n values = {\n 'blog': blog,\n 'blogs': blogs,\n 'first_post': first_post.with_prefetch(blog_posts.ids) if not search_string else None,\n 'other_tags': other_tags,\n 'state_info': {\"state\": state, \"published\": published_count, \"unpublished\": unpublished_count},\n 'active_tag_ids': active_tag_ids,\n 'tags_list': tags_list,\n 'posts': blog_posts,\n 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts],\n 'pager': pager,\n 'nav_list': self.nav_list(blog),\n 'blog_url': blog_url,\n 'date': date_begin,\n 'tag_category': tag_category,\n }\n response = request.render(\"website_blog.blog_post_short\", values)\n return response\n\n @http.route('/blog/search', csrf=False, type=\"http\", methods=['POST', 'GET'], auth=\"public\", website=True)\n def search_contents(self, **kw):\n \"\"\"get search result for auto suggestions\"\"\"\n strings = '%' + kw.get('name') + '%'\n try:\n domain = [('website_published', '=', True)]\n blog = request.env['blog.post'].with_user(SUPERUSER_ID).search(domain)\n sql = \"\"\"select id as res_id, name as name, name as value from blog_post where name ILIKE '{}'\"\"\"\n extra_query = ''\n limit = \" limit 15\"\n qry = sql + extra_query + limit\n request.cr.execute(qry.format(strings, tuple(blog and blog.ids)))\n name = request.cr.dictfetchall()\n except:\n name = {'name': 'None', 'value': 'None'}\n return json.dumps(name)\n", "sub_path": "website_search_blog/controllers/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "odoo.addons.website_blog.controllers.main.WebsiteBlog", "line_number": 32, "usage_type": "name"}, {"api_name": "odoo.http.request.website.website_domain", "line_number": 46, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 46, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 46, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 47, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 47, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 48, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 48, "usage_type": "name"}, {"api_name": "odoo.addons.http_routing.models.ir_http.unslug", "line_number": 50, "usage_type": "call"}, {"api_name": "odoo.addons.http_routing.models.ir_http.slug", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 52, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 52, "usage_type": "name"}, {"api_name": "odoo.http.request.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 54, "usage_type": "name"}, {"api_name": "odoo.http.request.httprequest.full_path.replace", "line_number": 55, "usage_type": "call"}, {"api_name": "odoo.http.request.httprequest", "line_number": 55, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 55, "usage_type": "name"}, {"api_name": "odoo.http.request.env.user.has_group", "line_number": 62, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 62, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 62, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 63, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 68, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 70, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 70, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 72, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 72, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 72, "usage_type": "name"}, {"api_name": "odoo.addons.website.controllers.main.QueryURL", "line_number": 74, "usage_type": "call"}, {"api_name": "odoo.http.request.website.pager", "line_number": 84, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 84, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 84, "usage_type": "name"}, {"api_name": "odoo.http.request.httprequest.path.partition", "line_number": 85, "usage_type": "call"}, {"api_name": "odoo.http.request.httprequest", "line_number": 85, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 85, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 95, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 95, "usage_type": "name"}, {"api_name": "odoo.http.request.website.viewref", "line_number": 96, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 96, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 96, "usage_type": "name"}, {"api_name": "odoo.http.request.website.viewref", "line_number": 97, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 97, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 97, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 115, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 115, "usage_type": "name"}, {"api_name": "odoo.addons.http_routing.models.ir_http.slug", "line_number": 116, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 129, "usage_type": "call"}, {"api_name": "odoo.http.request.render", "line_number": 136, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 136, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 34, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 34, "usage_type": "name"}, {"api_name": "odoo.SUPERUSER_ID", "line_number": 145, "usage_type": "argument"}, {"api_name": "odoo.http.request.env", "line_number": 145, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 145, "usage_type": "name"}, {"api_name": "odoo.http.request.cr.execute", "line_number": 150, "usage_type": "call"}, {"api_name": "odoo.http.request.cr", "line_number": 150, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 150, "usage_type": "name"}, {"api_name": "odoo.http.request.cr.dictfetchall", "line_number": 151, "usage_type": "call"}, {"api_name": "odoo.http.request.cr", "line_number": 151, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 151, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "odoo.http.route", "line_number": 139, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 139, "usage_type": "name"}]} +{"seq_id": "416554006", "text": "from pdcresource import *\nfrom pdcglobal import *\nfrom magic import Spell\n\nclass ColdSpell(Spell):\n def __init__(self):\n Spell.__init__(self)\n self.color = BLUE\n self.type = ST_GENERIC\n \nclass FrostRay(ColdSpell):\n def __init__(self):\n ColdSpell.__init__(self)\n self.phys_cost = 15\n self.mind_cost = 30\n self.name = 'Frost Ray'\n self.infotext = 'Damage Foes with cold'\n\n def cast(self, caster):\n self.caster = caster\n self.game.wait_for_target = self\n self.game.player_actions.cursor()\n def target_choosen(self, pos):\n target = self.get_ray_target(self.caster.pos(), pos)\n if target == None:\n self.game.shout('Your spell fizzles')\n else:\n amount = d(self.caster.mind / 20) + self.caster.mind / 20\n self.game.do_damage(target, amount, D_COLD)\n self.game.shout('%s freezed %s' % (self.caster.name, target.name))\n self.game.wait_for_target = None\n self.game.state = S_RUN", "sub_path": "PDC/PDC_5/PDC/src/magic/cold_spells.py", "file_name": "cold_spells.py", "file_ext": "py", "file_size_in_byte": 1043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "magic.Spell", "line_number": 5, "usage_type": "name"}, {"api_name": "magic.Spell.__init__", "line_number": 7, "usage_type": "call"}, {"api_name": "magic.Spell", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "506827358", "text": "import json\r\n\r\nfrom flask import Flask, render_template, redirect, url_for, request\r\nfrom flask_wtf import FlaskForm\r\nfrom requests.exceptions import ConnectionError\r\nfrom wtforms import IntegerField, SelectField, StringField\r\nfrom wtforms.validators import DataRequired\r\n\r\nimport urllib.request\r\nimport json\r\n\r\nclass ClientDataForm(FlaskForm):\r\n sulphates = StringField('Sulphates [0.33, 2]', validators=[DataRequired()])\r\n free_sulfur_dioxide = StringField('Free Sulfur Dioxide [1, 72]', validators=[DataRequired()])\r\n total_sulfur_dioxide = StringField('Total Sulfur Dioxide [6, 289]', validators=[DataRequired()])\r\n pH = StringField('pH [2.74, 4.01]', validators=[DataRequired()])\r\n\r\n\r\napp = Flask(__name__)\r\napp.config.update(\r\n CSRF_ENABLED=True,\r\n SECRET_KEY='you-will-never-guess',\r\n)\r\n\r\ndef get_prediction(description, company_profile, benefits):\r\n body = {\"sulphate\": [sulphates],\r\n \"free_sulfur_dioxide\":[free_sulfur_dioxide],\r\n \"total_sulfur_dioxide\": [total_sulfur_dioxide],\r\n \"pH\": [pH]}\r\n\r\n myurl = \"http://0.0.0.0:8180/predict\"\r\n req = urllib.request.Request(myurl)\r\n req.add_header('Content-Type', 'application/json; charset=utf-8')\r\n jsondata = json.dumps(body)\r\n jsondataasbytes = jsondata.encode('utf-8') # needs to be bytes\r\n req.add_header('Content-Length', len(jsondataasbytes))\r\n #print (jsondataasbytes)\r\n response = urllib.request.urlopen(req, jsondataasbytes)\r\n return json.loads(response.read())['predictions']\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/predicted/')\r\ndef predicted(response):\r\n response = json.loads(response)\r\n print(response)\r\n return render_template('predicted.html', response=response)\r\n\r\n\r\n@app.route('/predict_form', methods=['GET', 'POST'])\r\ndef predict_form():\r\n form = ClientDataForm()\r\n data = dict()\r\n if request.method == 'POST':\r\n data['sulphate'] = request.form.get('sulphate')\r\n data['free_sulfur_dioxide'] = request.form.get('free_sulfur_dioxide')\r\n data['total_sulfur_dioxide'] = request.form.get('total_sulfur_dioxide')\r\n data['pH'] = request.form.get('pH')\r\n\r\n\r\n try:\r\n response = str(get_prediction(data['sulphate'],\r\n data['free_sulfur_dioxide'],\r\n data['total_sulfur_dioxide'],\r\n data['pH']))\r\n print(response)\r\n except ConnectionError:\r\n response = json.dumps({\"error\": \"ConnectionError\"})\r\n return redirect(url_for('predicted', response=response))\r\n return render_template('form.html', form=form)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=8181, debug=True)", "sub_path": "app/front/run_front_server.py", "file_name": "run_front_server.py", "file_ext": "py", "file_size_in_byte": 2818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask_wtf.FlaskForm", "line_number": 12, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 13, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 13, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 14, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 14, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 15, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 15, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 16, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 32, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 32, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 32, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 38, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 38, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "requests.exceptions.ConnectionError", "line_number": 70, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "176466838", "text": "from django import forms\nfrom devices.models import Manufacturer\n\n\nclass ManufacturerForm(forms.ModelForm):\n name = forms.CharField(widget=forms.TextInput(\n attrs={\n 'id': 'name',\n 'class': 'form-control',\n 'placeholder': 'HP',\n\n }\n ))\n image = forms.FileField(required=False,\n label='Company Logo',\n widget=forms.FileInput(\n attrs={\n 'accept': \"image/*\",\n }))\n\n class Meta:\n model = Manufacturer\n fields = ['name', 'image']\n", "sub_path": "devices/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms.FileField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.FileInput", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "devices.models.Manufacturer", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "558862826", "text": "from __future__ import print_function\n\nimport os\nimport string\nimport tempfile\n\nimport pygraphviz\nfrom networkx.drawing import nx_agraph\n\nJAR_DIR = './baselines/constraint'\n\n\ndef regex2dfa(reg_ex, letter='q'):\n transfer_file = tempfile.NamedTemporaryFile(mode='w+')\n command = 'java -jar {}/regex2dfa.jar \"{}\" {}'.format(\n JAR_DIR, reg_ex, transfer_file.name)\n os.system(command)\n\n with open(transfer_file.name) as fname:\n dot = fname.read()\n print(dot, file=open('{}.dot'.format(transfer_file.name), 'w'))\n return nx_agraph.from_agraph(pygraphviz.AGraph(dot))\n\n\nclass DFA:\n def __init__(self, reg_ex):\n self.dfa = regex2dfa(reg_ex)\n self.current_state = 'q0'\n self.num_states = len(self.dfa.nodes())\n self.state_ids = dict(zip(self.dfa.nodes(), range(self.num_states)))\n\n def step(self, action):\n is_accept, self.current_state = self._traverse_dfa(\n action, self.current_state)\n return is_accept\n\n def reset(self):\n self.current_state = 'q0'\n\n def _traverse_dfa(self, char, start):\n \"\"\"\n dfa_dot: dfa in graphviz dot file\n first return value shows if next state is an accept state\n second return value is the next state\n \"\"\"\n # convert [1-2][0-9] | 3[0-5] to letter in the upper case alph.\n if char != 's' and int(char) >= 10 and int(char) <= 35:\n i = int(char) - 10\n char = '\"{}\"'.format(string.ascii_uppercase[i])\n\n dfa = self.dfa\n accept_states = [\n n for n in dfa.nodes()\n if dfa.nodes.data('shape')[n] == 'doublecircle'\n ]\n edges = dfa.edges.data('label')\n transitions = list(filter(lambda x: x[0] == start, edges))\n for transition in transitions:\n if transition[2] == str(char):\n next_state = transition[1]\n if next_state in accept_states:\n return True, next_state\n else:\n return False, next_state\n\n return False, 'q0'\n\n def states(self):\n return [str(n) for n in self.dfa.nodes()]\n\n def accepting_states(self):\n return [\n str(n) for n in self.dfa.nodes()\n if self.dfa.nodes.data('shape')[n] == 'doublecircle'\n ]\n\n def state_id(self):\n return self.state_ids[self.current_state]\n", "sub_path": "mujoco_experiments/baselines/constraint/dfa.py", "file_name": "dfa.py", "file_ext": "py", "file_size_in_byte": 2388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tempfile.NamedTemporaryFile", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}, {"api_name": "networkx.drawing.nx_agraph.from_agraph", "line_number": 22, "usage_type": "call"}, {"api_name": "networkx.drawing.nx_agraph", "line_number": 22, "usage_type": "name"}, {"api_name": "pygraphviz.AGraph", "line_number": 22, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "555666360", "text": "from django.conf.urls import url\nfrom . import views\napp_name = 'credit'\n\nurlpatterns =[\n # url(r'^$', views.credit, name='credit'),\n # url(r'^reyting/$', views.creditreyting, {'template_name : reyting.html'})\n url(r'^$', views.Listcredit, name='Listcreditrateinf'),\n url(r'^(?P[-\\w]+)/$', views.Listcredit, name='ListCreditViews'),\n url(r'^(?P\\d+)/(?P[-\\w]+)/$', views.DetailCredit, name='DetailCreditViews')\n\n]", "sub_path": "credit/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "277314246", "text": "import numpy as np\nimport math as math\nimport matplotlib.pyplot as plt\nimport h5py as h5py\nfrom sphere_diffusion_linear_fixedconc import sdl_analytical\n\n\ndef readh5file(h5file, nx, nr):\n length = 5.0\n radius = 0.028\n cc = []\n tt = []\n t0 = 0.0\n\n f0 = h5py.File(h5file, 'r')\n\n j = 0 \n for i in f0.keys():\n if j == 0:\n cc = f0[i][:, 0]\n j = j + 1\n else:\n cc = np.column_stack((cc, f0[i][:, 0]))\n t = i.split()\n tt.append(t0 + float(t[2]))\n f0.close()\n\n res = {}\n res['t'] = np.asarray(tt)\n res['c'] = np.asarray(cc)\n res['x'] = np.linspace(0, length, nx)\n r1 = np.linspace(radius, 0, nr+1)\n rr = np.zeros(nr)\n for i in range(nr):\n rr[i] = 0.5 * (r1[i] + r1[i+1]) \n res['r'] = rr * 10.0\n\n return res\n\n\nr1 = readh5file('sde_5x10x20.h5', 5, 10)\nr2 = readh5file('sde_5x10x40.h5', 5, 10)\nr3 = readh5file('sde_5x10x100.h5', 5, 10)\ncin = 1.0 #1.9941e-07;\nnr = 100\n\nlx = 0.08\nly = 0.90\n\nit1 = 0\nit2 = 4 \nit3 = 9 \nit4 = 19\nitt = [r1['t'][it1], r1['t'][it2], r1['t'][it3], r1['t'][it4]] \n\nra = np.linspace(0.0028, 0.28, 101)\nc4 = sdl_analytical(ra/10.0, itt, 0.028, 1.0e-6) \n\nax1 = plt.subplot(2, 2, 1)\ni = it1\nnx = 5\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\n\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 0], 'm-')\nplt.ylabel('c/c$_0$')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.text(lx, ly, '(a) t = %d s' % (r1['t'][i]), transform=ax1.transAxes)\nplt.setp(ax1.get_xticklabels(), visible=False)\n\nax2 = plt.subplot(2, 2, 2)\ni = it2\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 1], 'm-')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.setp(ax2.get_yticklabels(), visible=False)\nplt.text(lx, ly, '(b) t = %d s' % (r1['t'][i]), transform=ax2.transAxes)\nplt.setp(ax2.get_xticklabels(), visible=False)\n\nax3 = plt.subplot(2, 2, 3)\ni = it3\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 2], 'm-')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.text(lx, ly, '(c) t = %d s' % (r1['t'][i]), transform=ax3.transAxes)\nplt.xlabel('r (mm)')\nplt.ylabel('c/c$_0$')\n\nax4 = plt.subplot(2, 2, 4)\ni = it4\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 3], 'm-')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.text(lx, ly, '(d) t = %d s' % (r1['t'][i]), transform=ax4.transAxes)\nplt.xlabel('r (mm)')\nplt.setp(ax4.get_yticklabels(), visible=False)\nlgd = plt.legend(('nt = 20', 'nt = 50', 'nt = 100', 'Analytical'),loc=3)\nlgd.draw_frame(False)\n#txt = lgd.get_texts()\n#plt.setp(txt, fontsize='small') \n\nfig = plt.gcf()\nfig.subplots_adjust(left=0.08, right=0.95, wspace=0.05, hspace=0.08)\nfig.set_size_inches(8, 6)\nplt.savefig('prof.pdf')\nplt.show()\n", "sub_path": "tests/sde/equaldist-dt/prof.py", "file_name": "prof.py", "file_ext": "py", "file_size_in_byte": 3435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "h5py.File", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 56, "usage_type": "call"}, {"api_name": "sphere_diffusion_linear_fixedconc.sdl_analytical", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "170488911", "text": "import os\nimport sys\nsys.path.append('../')\n\nfrom dateutil.parser import parse\n\nfrom middleware.rabbitmq_queue import RabbitMQQueue\nfrom middleware.rabbitmq_queues import RabbitMQQueues\nfrom middleware.log import Logger\nfrom middleware.shenlong_status_sender import ShenlongStatusSender\n\nTWIT_ID = 0\nAUTHOR_ID = 1\nINBOUND = 2\nCREATED_AT = 3\nTEXT = 4\n\nNUM_COLUMS = 7\n\nIS_CUSTOMER = \"True\"\n\nRECEIVE_QUEUE_NAME = \"preprocesed_twits\"\nSEND_QUEUE_NAME = \"raw_twits\"\nHEALTH_QUEUE = \"pings\"\n\nclass FilterParser(object):\n def __init__(self, send_queues, receive_queue, shenlong_sender, logger):\n self.send_queues = send_queues\n self.receive_queue = receive_queue\n self.shenlong_sender = shenlong_sender\n self.logger = logger\n self.received_twits = {}\n\n def run(self):\n self.logger.log(\"Start consuming\")\n self.shenlong_sender.start()\n self.receive_queue.consume(self._callback, self._eoj_callback)\n self.logger.log(\"Sending EOM to queues\")\n self.send_queues.send_eom()\n self.logger.log(\"Finish\")\n self.shenlong_sender.stop()\n self.shenlong_sender.join()\n\n def _callback(self, ch, method, properties, decoded_body):\n self.logger.log_with_frequency(\"Received line %s\", decoded_body)\n\n body_values = decoded_body.rstrip().split(\",\")\n twit_id = body_values[TWIT_ID]\n\n if (len(body_values) != NUM_COLUMS) or (body_values[INBOUND] != IS_CUSTOMER) or (twit_id in self.received_twits):\n self.logger.log_with_frequency(\"Twit discarted\")\n ch.basic_ack(delivery_tag = method.delivery_tag)\n return\n\n day = str(parse(body_values[CREATED_AT]).date())\n\n self.logger.log_with_frequency(\"Sending parsed value\")\n\n self.send_queues.send(\"{},{},{},{}\".format(body_values[TWIT_ID], body_values[AUTHOR_ID], day, body_values[TEXT]), body_values[TWIT_ID])\n\n self.received_twits[twit_id] = True\n\n ch.basic_ack(delivery_tag = method.delivery_tag)\n\n def _eoj_callback(self, eoj_msg):\n self.logger.log(\"Received EOJ\")\n self.send_queues.send_eoj(eoj_msg)\n self.logger.log(\"Send EOJ\")\n\nif __name__ == '__main__':\n rabbitmq_host = os.environ['RABBITMQ_HOST']\n filter_parser_workers = int(os.environ['FILTER_PARSER_WORKERS'])\n analyzer_workers = int(os.environ['ANALYZER_WORKERS'])\n\n worker_id = os.environ['SERVICE_ID']\n\n log_file = os.environ['LOG_FILE']\n log_frequency = int(os.environ['LOG_FREQUENCY'])\n\n send_queues = RabbitMQQueues(RECEIVE_QUEUE_NAME, rabbitmq_host, analyzer_workers)\n receive_queue = RabbitMQQueue(\"{}{}\".format(SEND_QUEUE_NAME, worker_id), rabbitmq_host)\n health_queue = RabbitMQQueue(HEALTH_QUEUE, rabbitmq_host)\n\n shenlong_sender = ShenlongStatusSender(\"FILTER-PARSER\", worker_id, health_queue)\n logger = Logger(\"FILTER PARSER [{}]\".format(worker_id), log_file, log_frequency)\n\n worker = FilterParser(send_queues, receive_queue, shenlong_sender, logger)\n\n logger.log(\"Worker created, started running\")\n worker.run()\n logger.log(\"Worker finished, exiting\")\n", "sub_path": "src/filter_parser/filter_parser.py", "file_name": "filter_parser.py", "file_ext": "py", "file_size_in_byte": 3107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 55, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 78, "usage_type": "attribute"}, {"api_name": "middleware.rabbitmq_queues.RabbitMQQueues", "line_number": 80, "usage_type": "call"}, {"api_name": "middleware.rabbitmq_queue.RabbitMQQueue", "line_number": 81, "usage_type": "call"}, {"api_name": "middleware.rabbitmq_queue.RabbitMQQueue", "line_number": 82, "usage_type": "call"}, {"api_name": "middleware.shenlong_status_sender.ShenlongStatusSender", "line_number": 84, "usage_type": "call"}, {"api_name": "middleware.log.Logger", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "525774534", "text": "import cv2\nimport numpy as np\nimport csv\nimport glob\nimport os\n\nsumclass=[0,0,0,0,0,0,0,0,0]\nsave_path_pola = \"pola kalung\"\n\n#Mengambil gambar tiap folder kelas\nfor class_image_path in glob.glob(\"D:\\PycharmProjects\\PCDSAPI\\kalung sapi\\*\"):\n print(class_image_path)\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 1'): neck_class = 1\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 2'): neck_class = 2\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 3'): neck_class = 3\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 4'): neck_class = 4\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 5'): neck_class = 5\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 6'): neck_class = 6\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 7'): neck_class = 7\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 8'): neck_class = 8\n f = True\n class_folder = \"Kelas \"+str(neck_class)\n new_save_path = os.path.join(save_path_pola,class_folder)\n print(\"PATH ==\",new_save_path)\n for image_path in glob.glob(os.path.join(class_image_path, \"*.bmp\")):\n print(image_path)\n # if(neck_class!=7):\n # break\n x=str(neck_class)+\"-class-\"+str(sumclass[neck_class])\n name = x + \"-test.bmp\"\n print(name)\n print(type(name))\n im_gray = cv2.imread(image_path,0)\n thresh = 127\n im_binerr = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1]\n im_gray = cv2.medianBlur(im_gray,5)\n im_biner = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)\n\n arr = []\n v = []\n if(neck_class==7):\n houghparam=35\n else:\n houghparam=55\n\n try:\n\n circles = cv2.HoughCircles(im_gray, cv2.HOUGH_GRADIENT, 1, 100, param1=290, param2=houghparam, minRadius=0, maxRadius=0)\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(im_biner, (i[0], i[1]), i[2], (0, 255, 255), 2)\n cv2.circle(im_biner, (i[0], i[1]), 2, (0, 0, 255), 112)\n\n flag = 1\n row, col, ch = im_biner.shape\n graykanvas = np.zeros((row, col, 1), np.uint8)\n for i in range(0, row):\n for j in range(0, col):\n b, g, r = im_biner[i, j]\n if (b == 255 & g == 0 & r == 0):\n graykanvas.itemset((i, j, 0), 255)\n if (flag == 1):\n x = i\n y = j\n flag = 100\n else:\n graykanvas.itemset((i, j, 0), 0)\n\n im_hasil = cv2.subtract(graykanvas, im_gray)\n\n hasil_crop = im_hasil[x:x + 112, y - 56:y + 56] # im awe [y,x]\n cv2.imshow(\"hasil crop\", hasil_crop)\n thresh = 130\n\n kernel = np.ones((5, 5), np.uint8)\n\n crop_biner = cv2.threshold(hasil_crop, thresh, 255, cv2.THRESH_BINARY)[1]\n\n\n\n cv2.imwrite(os.path.join(new_save_path,name),crop_biner)\n\n row, col= crop_biner.shape\n for r in range(0,row):\n a = 0\n for c in range(0,col):\n if crop_biner[r,c]==255:\n crop_biner[r,c]=1\n a+=crop_biner[r,c]\n v.append(a)\n # print(v)\n # print(r)\n print(len(v))\n print(\"tipe\",type(v))\n print(v)\n v=v/max(v)\n v=[int(round(l)) for l in v]\n\n arr.append(name)\n for d in v:\n arr.append(d)\n arr.append(neck_class)\n print(arr)\n\n\n csvfile = \"datavector.csv\"\n\n with open(csvfile, 'a+',newline='') as output:\n writer = csv.writer(output, lineterminator=',')\n for val in arr[:-1]:\n writer.writerow([val])\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerow([arr[-1]])\n\n sumclass[neck_class]=sumclass[neck_class]+1\n\n except Exception:\n pass\n\n if (sumclass[neck_class]==3):\n break\n", "sub_path": "hough.py", "file_name": "hough.py", "file_ext": "py", "file_size_in_byte": 4174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "glob.glob", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.medianBlur", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.HoughCircles", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.HOUGH_GRADIENT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 56, "usage_type": "attribute"}, {"api_name": "cv2.subtract", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 109, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "280440163", "text": "\"\"\"\nDefinition of urls for AvalFrameWeb.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\nfrom django.conf.urls import include\n\nimport app.forms\nfrom app.views import home\nfrom app.views import analise_dados\n\n# Uncomment the next lines to enable the admin:\n# from django.conf.urls import include\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = [\n # Examples:\n #url(r'^analise_dados/', include('app.urls.analise_dados.urls')),\n url(r'^$', home.home, name='home'),\n url(r'^competencias/', include('app.urls.competencias.urls')),\n url(r'^niveis_competencia_avaliacao/', include('app.urls.niveis_competencia_avaliacao.urls')),\n url(r'^aprendizagens/', include('app.urls.aprendizagens.urls')),\n url(r'^niveis_aprendizagem/', include('app.urls.niveis_aprendizagem.urls')),\n url(r'^jogos_digitais/', include('app.urls.jogos_digitais.urls')),\n url(r'^niveis_jogo/', include('app.urls.niveis_jogo.urls')),\n url(r'^aeej/', include('app.urls.aeej.urls')),\n url(r'^dispositivos_captura/', include('app.urls.dispositivos_captura.urls')),\n url(r'^jogadores/', include('app.urls.jogadores.urls')),\n url(r'^aprendizagens_aeej/', include('app.urls.aprendizagens_aeej.urls')),\n url(r'^competencias_aprendizagens/', include('app.urls.competencias_aprendizagens.urls')),\n url(r'^etapas_jogo/', include('app.urls.etapas_jogo.urls')),\n url(r'^fases_jogo/', include('app.urls.fases_jogo.urls')),\n \n \n url(r'^carga_aprendizagens/', include('app.urls.carga_aprendizagens.urls')),\n \n url(r'^geracao_relatorio/', include('app.urls.geracao_relatorio.urls')),\n \n \n\n #url(r'^login/$',\n # django.contrib.auth.views.login,\n # {\n # 'template_name': 'app/login.html',\n # 'authentication_form': app.forms.BootstrapAuthenticationForm,\n # 'extra_context':\n # {\n # 'title': 'Log in',\n # 'year': datetime.now().year,\n # }\n # },\n # name='login'),\n #url(r'^logout$',\n # django.contrib.auth.views.logout,\n # {\n # 'next_page': '/',\n # },\n # name='logout'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n]\n", "sub_path": "AvalFrameWeb/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "app.views.home.home", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.views.home", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "124486028", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n.. _tut-eeg-fsaverage-source-modeling:\n\nEEG forward operator with a template MRI\n========================================\n\nThis tutorial explains how to compute the forward operator from EEG data\nusing the standard template MRI subject ``fsaverage``.\n\n.. caution:: Source reconstruction without an individual T1 MRI from the\n subject will be less accurate. Do not over interpret\n activity locations which can be off by multiple centimeters.\n\n.. contents:: This tutorial covers:\n :local:\n :depth: 2\n\n\"\"\"\n# Authors: Alexandre Gramfort \n# Joan Massich \n#\n# License: BSD Style.\n\nimport os.path as op\n\nimport mne\nfrom mne.datasets import eegbci\nfrom mne.datasets import fetch_fsaverage\n\n# Download fsaverage files\nfs_dir = fetch_fsaverage(verbose=True)\nsubjects_dir = op.dirname(fs_dir)\n\n# The files live in:\nsubject = 'fsaverage'\ntrans = 'fsaverage' # MNE has a built-in fsaverage transformation\nsrc = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')\nbem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')\n\n##############################################################################\n# Load the data\n# -------------\n#\n# We use here EEG data from the BCI dataset.\n#\n# .. note:: See :ref:`plot_montage` to view all the standard EEG montages\n# available in MNE-Python.\n\nraw_fname, = eegbci.load_data(subject=1, runs=[6])\nraw = mne.io.read_raw_edf(raw_fname, preload=True)\n\n# Clean channel names to be able to use a standard 1005 montage\nnew_names = dict(\n (ch_name,\n ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp'))\n for ch_name in raw.ch_names)\nraw.rename_channels(new_names)\n\n# Read and set the EEG electrode locations\nmontage = mne.channels.make_standard_montage('standard_1005')\n\nraw.set_montage(montage)\nraw.set_eeg_reference(projection=True) # needed for inverse modeling\n\n# Check that the locations of EEG electrodes is correct with respect to MRI\nmne.viz.plot_alignment(\n raw.info, src=src, eeg=['original', 'projected'], trans=trans,\n show_axes=True, mri_fiducials=True, dig='fiducials')\n\n##############################################################################\n# Setup source space and compute forward\n# --------------------------------------\n\nfwd = mne.make_forward_solution(raw.info, trans=trans, src=src,\n bem=bem, eeg=True, mindist=5.0, n_jobs=1)\nprint(fwd)\n\n# for illustration purposes use fwd to compute the sensitivity map\neeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')\neeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir,\n clim=dict(lims=[5, 50, 100]))\n", "sub_path": "0.21/_downloads/41f4872bb7e7ad4ec492ad557209d3d7/plot_eeg_no_mri.py", "file_name": "plot_eeg_no_mri.py", "file_ext": "py", "file_size_in_byte": 2723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "mne.datasets.fetch_fsaverage", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "name"}, {"api_name": "mne.datasets.eegbci.load_data", "line_number": 50, "usage_type": "call"}, {"api_name": "mne.datasets.eegbci", "line_number": 50, "usage_type": "name"}, {"api_name": "mne.io.read_raw_edf", "line_number": 51, "usage_type": "call"}, {"api_name": "mne.io", "line_number": 51, "usage_type": "attribute"}, {"api_name": "mne.channels.make_standard_montage", "line_number": 61, "usage_type": "call"}, {"api_name": "mne.channels", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mne.viz.plot_alignment", "line_number": 67, "usage_type": "call"}, {"api_name": "mne.viz", "line_number": 67, "usage_type": "attribute"}, {"api_name": "mne.make_forward_solution", "line_number": 75, "usage_type": "call"}, {"api_name": "mne.sensitivity_map", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "283079721", "text": "# Copyright (C) 2018 Apple Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport logging\nimport os\nimport subprocess\n\nimport ews.config as config\n\n_log = logging.getLogger(__name__)\n\n\nclass Buildbot():\n @classmethod\n def send_patch_to_buildbot(cls, patch_path, properties=[]):\n command = ['buildbot', 'try',\n '--connect=pb',\n '--master={}:{}'.format(config.BUILDBOT_SERVER_HOST, config.BUILDBOT_SERVER_PORT),\n '--username={}'.format(config.BUILDBOT_PB_USERNAME),\n '--passwd={}'.format(config.BUILDBOT_PB_PASSWORD),\n '--diff={}'.format(patch_path),\n '--repository=']\n\n for property in properties:\n command.append('--property={}'.format(property))\n\n _log.debug('Executing command: {}'.format(command))\n return_code = subprocess.call(command)\n if return_code:\n _log.warn('Error executing: {}, return code={}'.format(command, return_code))\n\n return return_code\n", "sub_path": "Tools/BuildSlaveSupport/ews-app/ews/common/buildbot.py", "file_name": "buildbot.py", "file_ext": "py", "file_size_in_byte": 2291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "ews.config.BUILDBOT_SERVER_HOST", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ews.config", "line_number": 37, "usage_type": "name"}, {"api_name": "ews.config.BUILDBOT_SERVER_PORT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ews.config.BUILDBOT_PB_USERNAME", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ews.config", "line_number": 38, "usage_type": "name"}, {"api_name": "ews.config.BUILDBOT_PB_PASSWORD", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ews.config", "line_number": 39, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "371835123", "text": "#!/usr/bin/env python\nimport os\nfrom contextlib import contextmanager\nfrom portal.utils import parse_date\nfrom dotenv import load_dotenv\nfrom portal.database import db\nfrom portal.etl.database import (\n etl_import_database,\n recruit_table,\n recruit_summary_table,\n delegate_table,\n practice_table,\n practice_group_table,\n practice_groups_practices_table,\n practice_status_table,\n exclusion_reason_table,\n)\nfrom portal.models import (\n Recruit,\n RecruitSummary,\n Delegate,\n Practice,\n PracticeGroup,\n PracticeStatus,\n ExclusionReason,\n)\nfrom portal import create_app\n\n\ndef import_practice_status():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_status (\n id INT PRIMARY KEY,\n name VARCHAR(255)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE UNIQUE INDEX idx__etl_practice_status__name ON etl_practice_status(name);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_status_table.select()):\n imports.append(PracticeStatus(\n id=r['id'],\n name=r['name'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_recruits():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_recruit (\n status VARCHAR(255),\n nhs_number VARCHAR(255),\n study_id VARCHAR(255),\n practice_code VARCHAR(255),\n first_name VARCHAR(64),\n last_name VARCHAR(64),\n date_of_birth DATE,\n civicrm_contact_id INT,\n civicrm_case_id INT PRIMARY KEY,\n recruited_date DATE,\n invoice_year VARCHAR(255),\n invoice_quarter VARCHAR(255),\n reimbursed_status VARCHAR(255)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_recruit__nhs_number ON etl_recruit(nhs_number);\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_recruit__practice_code ON etl_recruit(practice_code);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(recruit_table.select()):\n imports.append(Recruit(\n status=r['status'],\n nhs_number=r['nhs_number'],\n study_id=r['study_id'],\n practice_code=r['practice_code'],\n first_name=r['first_name'],\n last_name=r['last_name'],\n date_of_birth=r['date_of_birth'],\n civicrm_contact_id=r['civicrm_contact_id'],\n civicrm_case_id=r['civicrm_case_id'],\n recruited_date=r['recruited_date'],\n invoice_year=r['invoice_year'],\n invoice_quarter=r['invoice_quarter'],\n reimbursed_status=r['reimbursed_status'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_recruit_summary():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_recruit_summary (\n practice_code VARCHAR(100),\n recruited INTEGER,\n excluded INTEGER,\n withdrawn INTEGER,\n last_recruited_date DATE,\n excluded_percentage DECIMAL(30, 4),\n withdrawn_percentage DECIMAL(30, 4)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_recruit_summary__practice_code ON etl_recruit_summary(practice_code);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(recruit_summary_table.select()):\n imports.append(RecruitSummary(\n practice_code=r['practice_code'],\n recruited=r['recruited'],\n excluded=int(r['excluded']),\n withdrawn=int(r['withdrawn']),\n last_recruited_date=r['last_recruited_date'],\n excluded_percentage=r['excluded_percentage'],\n withdrawn_percentage=r['withdrawn_percentage'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_delegates():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_delegate (\n practice_code VARCHAR(255),\n instance INT,\n name VARCHAR(255),\n role VARCHAR(255),\n gcp_trained BIT,\n gv_trained BIT,\n on_delegation_log_yn BIT,\n gv_start_del_log DATE,\n gv_end_del_log DATE,\n rsn_not_on_del_log VARCHAR(500),\n gv_phone_a VARCHAR(100),\n gv_phone_b VARCHAR(100),\n contact_email_add VARCHAR(100),\n primary_contact_yn BIT\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_delegates__practice_code ON etl_delegate(practice_code);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(delegate_table.select()):\n imports.append(Delegate(\n practice_code=r['practice_code'],\n instance=r['instance'],\n name=r['name'],\n role=r['role'],\n gcp_trained=r['gcp_trained'],\n gv_trained=r['gv_trained'],\n on_delegation_log_yn=r['on_delegation_log_yn'],\n gv_start_del_log=parse_date(r['gv_start_del_log']),\n gv_end_del_log=parse_date(r['gv_end_del_log']),\n gv_phone_a=r['gv_phone_a'],\n gv_phone_b=r['gv_phone_b'],\n contact_email_add=r['contact_email_add'],\n primary_contact_yn=r['primary_contact_yn'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_practices():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_detail (\n project_id INT,\n ccg INT,\n federation INT,\n code VARCHAR(255),\n name VARCHAR(255),\n street_address VARCHAR(255),\n town VARCHAR(255),\n city VARCHAR(255),\n county VARCHAR(255),\n postcode VARCHAR(255),\n partners VARCHAR(255),\n collab_ag_comp_yn BIT,\n collab_ag_signed_date_str VARCHAR(100),\n isa_comp_yn BIT,\n isa_1_signed_date_str VARCHAR(255),\n isa_1_caldicott_guard_end_str VARCHAR(255),\n agree_66_comp_yn BIT,\n agree_66_signed_date_1_str VARCHAR(255),\n agree_66_end_date_2_str VARCHAR(255),\n genvasc_initiated BIT,\n status_id INT NULL\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_detail__practice_code ON etl_practice_detail(code);\n \"\"\")\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_detail__ccg ON etl_practice_detail(ccg);\n \"\"\")\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_detail__federation ON etl_practice_detail(federation);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_table.select()):\n imports.append(Practice(\n project_id=r['project_id'],\n code=r['code'],\n name=r['name'],\n ccg=r['ccg'],\n street_address=r['street_address'],\n town=r['town'],\n city=r['city'],\n county=r['county'],\n postcode=r['postcode'],\n federation=r['federation'],\n partners=r['partners'],\n collab_ag_comp_yn=r['collab_ag_comp_yn'],\n collab_ag_signed_date_str=parse_date(r['collab_ag_signed_date_str']),\n isa_comp_yn=r['isa_comp_yn'],\n isa_1_signed_date_str=parse_date(r['isa_1_signed_date_str']),\n isa_1_caldicott_guard_end_str=parse_date(r['isa_1_caldicott_guard_end_str']),\n agree_66_comp_yn=r['agree_66_comp_yn'],\n agree_66_signed_date_1_str=parse_date(r['agree_66_signed_date_1_str']),\n agree_66_end_date_2_str=parse_date(r['agree_66_end_date_2_str']),\n genvasc_initiated=r['genvasc_initiated'] in ('1', 1),\n status_id=r['status_id'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_practice_groups():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_group (\n project_id INT,\n identifier VARCHAR(255),\n type VARCHAR(255),\n name VARCHAR(255),\n PRIMARY KEY (project_id, identifier, type)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_group__type ON etl_practice_group(type);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_group_table.select()):\n imports.append(PracticeGroup(\n project_id=r['project_id'],\n type=r['type'],\n identifier=r['identifier'],\n name=r['name'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_practice_groups_practices():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_groups_practices (\n practice_group_type VARCHAR(200),\n practice_group_project_id INT,\n practice_group_identifier INT,\n practice_code VARCHAR(255),\n PRIMARY KEY (practice_group_type, practice_group_project_id, practice_group_identifier, practice_code)\n );\n \"\"\")\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_groups_practices_table.select()):\n try:\n p = Practice.query.filter_by(code=r['practice_code']).one()\n pg = PracticeGroup.query.filter_by(\n type=r['practice_group_type'],\n project_id=r['practice_group_project_id'],\n identifier=r['practice_group_identifier'],\n ).one()\n\n pg.practices.add(p)\n db.session.add(pg)\n except:\n print(r['practice_group_type'])\n print(r['practice_group_project_id'])\n print(r['practice_group_identifier'])\n\n db.session.commit()\n\n\ndef import_exclusion_reasons():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_exclusion_reason (\n civicrm_case_id INT PRIMARY KEY,\n details VARCHAR(500)\n );\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(exclusion_reason_table.select()):\n imports.append(ExclusionReason(\n civicrm_case_id=r['civicrm_case_id'],\n details=r['details'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\n# Load environment variables from '.env' file.\nload_dotenv()\n\napp = create_app()\ncontext = app.app_context()\ncontext.push()\n\nimport_practice_status()\nimport_practice_groups()\nimport_practices()\nimport_recruits()\nimport_recruit_summary()\nimport_delegates()\nimport_practice_groups_practices()\nimport_exclusion_reasons()\n\ncontext.pop()\n", "sub_path": "dev_import.py", "file_name": "dev_import.py", "file_ext": "py", "file_size_in_byte": 11395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "portal.database.db.engine.execute", "line_number": 31, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 31, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 31, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 38, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 38, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 38, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 44, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_status_table.select", "line_number": 45, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_status_table", "line_number": 45, "usage_type": "name"}, {"api_name": "portal.models.PracticeStatus", "line_number": 46, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 51, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 51, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 51, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 52, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 52, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 52, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 54, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 54, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 58, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 58, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 58, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 76, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 76, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 76, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 80, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 80, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 80, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 86, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_table.select", "line_number": 87, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_table", "line_number": 87, "usage_type": "name"}, {"api_name": "portal.models.Recruit", "line_number": 88, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 104, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 104, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 104, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 105, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 105, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 105, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 107, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 107, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 107, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 111, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 111, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 111, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 123, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 123, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 123, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 129, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_summary_table.select", "line_number": 130, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_summary_table", "line_number": 130, "usage_type": "name"}, {"api_name": "portal.models.RecruitSummary", "line_number": 131, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 141, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 141, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 141, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 142, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 142, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 142, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 144, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 144, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 144, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 148, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 148, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 148, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 167, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 167, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 167, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 173, "usage_type": "call"}, {"api_name": "portal.etl.database.delegate_table.select", "line_number": 174, "usage_type": "call"}, {"api_name": "portal.etl.database.delegate_table", "line_number": 174, "usage_type": "name"}, {"api_name": "portal.models.Delegate", "line_number": 175, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 183, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 184, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 191, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 191, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 191, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 192, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 192, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 192, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 194, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 194, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 194, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 198, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 198, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 198, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 224, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 224, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 224, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 227, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 227, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 227, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 230, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 230, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 230, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 236, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_table.select", "line_number": 237, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_table", "line_number": 237, "usage_type": "name"}, {"api_name": "portal.models.Practice", "line_number": 238, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 251, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 253, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 254, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 256, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 257, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 262, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 262, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 262, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 263, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 263, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 263, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 265, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 265, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 265, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 269, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 269, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 269, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 279, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 279, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 279, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 285, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_group_table.select", "line_number": 286, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_group_table", "line_number": 286, "usage_type": "name"}, {"api_name": "portal.models.PracticeGroup", "line_number": 287, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 294, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 294, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 294, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 295, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 295, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 295, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 297, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 297, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 297, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 301, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 301, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 301, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 311, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_groups_practices_table.select", "line_number": 312, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_groups_practices_table", "line_number": 312, "usage_type": "name"}, {"api_name": "portal.models.Practice.query.filter_by", "line_number": 314, "usage_type": "call"}, {"api_name": "portal.models.Practice.query", "line_number": 314, "usage_type": "attribute"}, {"api_name": "portal.models.Practice", "line_number": 314, "usage_type": "name"}, {"api_name": "portal.models.PracticeGroup.query.filter_by", "line_number": 315, "usage_type": "call"}, {"api_name": "portal.models.PracticeGroup.query", "line_number": 315, "usage_type": "attribute"}, {"api_name": "portal.models.PracticeGroup", "line_number": 315, "usage_type": "name"}, {"api_name": "portal.database.db.session.add", "line_number": 322, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 322, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 322, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 328, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 328, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 328, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 332, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 332, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 332, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 341, "usage_type": "call"}, {"api_name": "portal.etl.database.exclusion_reason_table.select", "line_number": 342, "usage_type": "call"}, {"api_name": "portal.etl.database.exclusion_reason_table", "line_number": 342, "usage_type": "name"}, {"api_name": "portal.models.ExclusionReason", "line_number": 343, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 348, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 348, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 348, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 349, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 349, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 349, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 351, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 351, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 351, "usage_type": "name"}, {"api_name": "dotenv.load_dotenv", "line_number": 355, "usage_type": "call"}, {"api_name": "portal.create_app", "line_number": 357, "usage_type": "call"}]} +{"seq_id": "284114919", "text": "\"\"\"andapp URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom webapp.views import comment, message_user, message_customer, friend_delete, friend_add, busket, busket_delete, dream, dream_delete, like, logout, check_user, insert_user, index_main_content, index_customer_main, index_main, index_registration, index_search, index_profile, index_busket, index_dream, index_store, index_about, customer_main, customer_profile, customer_goods, customer_messages, customer_statistics, customer_main_content, customer_main_content_edit, insert_customer, check_customer, insert_product, update_product\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', index_main),\n url(r'^index_main/$', index_main),\n url(r'^registration/$', index_registration),\n url(r'^search/$', index_search),\n url(r'^logout/$', logout),\n\n url(r'^index_main_content/$', index_main_content),\n url(r'^index_customer_main/$', index_customer_main),\n url(r'^profile/$', index_profile),\n url(r'^basket/$', index_busket), \n url(r'^dream/$', index_dream),\n url(r'^store/$', index_store), \n url(r'^about/$', index_about), \n url(r'^insert_user/$', insert_user),\n url(r'^check_user/$', check_user),\n url(r'^like/$', like),\n url(r'^dream_insert/$', dream),\n url(r'^dream_delete/$', dream_delete),\n url(r'^busket/$', busket), \n url(r'^busket_delete/$', busket_delete),\n url(r'^friend_add/$', friend_add),\n url(r'^friend_delete/$', friend_delete), \n url(r'^message_user/$', message_user),\n url(r'^message_customer/$', message_customer),\n url(r'^comment/$', comment), \n \n \n\n url(r'^customer_main/$', customer_main),\n url(r'^customer_profile/$', customer_profile),\n url(r'^customer_goods/$', customer_goods),\n url(r'^customer_messages/$', customer_messages),\n url(r'^customer_statistics/$', customer_statistics),\n \n url(r'^insert_customer/$', insert_customer),\n url(r'^check_customer/$', check_customer),\n url(r'^update_product/$', update_product),\n url(r'^insert_product/$', insert_product),\n\n url(r'^customer_main_content/$', customer_main_content),\n url(r'^customer_main_content_edit/$', customer_main_content_edit),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "andapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2935, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "webapp.views.index_main", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "webapp.views.index_main", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "webapp.views.index_registration", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "webapp.views.index_search", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "webapp.views.logout", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "webapp.views.index_main_content", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "webapp.views.index_customer_main", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "webapp.views.index_profile", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "webapp.views.index_busket", "line_number": 32, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "webapp.views.index_dream", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "webapp.views.index_store", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "webapp.views.index_about", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "webapp.views.insert_user", "line_number": 36, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "webapp.views.check_user", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "webapp.views.like", "line_number": 38, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "webapp.views.dream", "line_number": 39, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "webapp.views.dream_delete", "line_number": 40, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "webapp.views.busket", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 42, "usage_type": "call"}, {"api_name": "webapp.views.busket_delete", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "webapp.views.friend_add", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 44, "usage_type": "call"}, {"api_name": "webapp.views.friend_delete", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "webapp.views.message_user", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "webapp.views.message_customer", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 47, "usage_type": "call"}, {"api_name": "webapp.views.comment", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 51, "usage_type": "call"}, {"api_name": "webapp.views.customer_main", "line_number": 51, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 52, "usage_type": "call"}, {"api_name": "webapp.views.customer_profile", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}, {"api_name": "webapp.views.customer_goods", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 54, "usage_type": "call"}, {"api_name": "webapp.views.customer_messages", "line_number": 54, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 55, "usage_type": "call"}, {"api_name": "webapp.views.customer_statistics", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 57, "usage_type": "call"}, {"api_name": "webapp.views.insert_customer", "line_number": 57, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 58, "usage_type": "call"}, {"api_name": "webapp.views.check_customer", "line_number": 58, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 59, "usage_type": "call"}, {"api_name": "webapp.views.update_product", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 60, "usage_type": "call"}, {"api_name": "webapp.views.insert_product", "line_number": 60, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 62, "usage_type": "call"}, {"api_name": "webapp.views.customer_main_content", "line_number": 62, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 63, "usage_type": "call"}, {"api_name": "webapp.views.customer_main_content_edit", "line_number": 63, "usage_type": "argument"}, {"api_name": "django.conf.urls.static.static", "line_number": 65, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 65, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 65, "usage_type": "attribute"}]} +{"seq_id": "582642831", "text": "\"\"\"\nGeneral purpose helper functions.\n\"\"\"\nimport os\nfrom configparser import RawConfigParser\nfrom pathlib import Path\nfrom timeit import default_timer as timer\n\n\ndef get_config():\n \"\"\"\n Looks for a config file using the APP_ENV environment\n variable and reads in the configuration as a dict.\n :return: dict(cfg dict, root, cfg_path)\n \"\"\"\n # set root directory for the app (this directory, that is)\n root = Path.cwd()\n\n # setup configuration file path using the APP_ENV environment variable\n cfg_path = root / 'config' / '{}.ini'.format(os.environ.get('APP_ENV'))\n cfg_parser = RawConfigParser()\n\n # read .ini file for the appropriate app setup (dev, prod or test)\n cfg_parser.read(cfg_path)\n\n # create a dict with the config\n cfg_dict = {x: dict(cfg_parser.items(x)) for x in cfg_parser.sections()}\n return {\"cfg\": cfg_dict, \"root\": root, \"cfg_path\": cfg_path}\n\n\ndef time_func_perf(func, func_args=None, func_kwargs=None) -> float:\n \"\"\"\n Return the time elapsed between start and end, calling a func in\n between them.\n :param func: function to be called\n :param func_args: arguments to be passed to the function\n :param func_kwargs: keyword arguments to passed to the function\n :return: time in fractional seconds\n \"\"\"\n if func_args and func_kwargs:\n start = timer()\n func(*func_args, **func_kwargs)\n stop = timer()\n return stop - start\n\n if func_args and not func_kwargs:\n start = timer()\n func(*func_args)\n stop = timer()\n return stop - start\n\n if func_kwargs and not func_args:\n start = timer()\n func(**func_kwargs)\n stop = timer()\n return stop - start\n\n if not func_args and not func_kwargs:\n start = timer()\n func()\n stop = timer()\n return stop - start\n", "sub_path": "helpers/general.py", "file_name": "general.py", "file_ext": "py", "file_size_in_byte": 1851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pathlib.Path.cwd", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "configparser.RawConfigParser", "line_number": 21, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 41, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 43, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 47, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 49, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 53, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 55, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 59, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "42454460", "text": "import os\n\nfrom gym.envs.registration import register\nfrom collections import OrderedDict\nfrom minerl.env import spaces\nfrom minerl.env.core import MineRLEnv\n\nimport numpy as np\nmissions_dir = os.path.dirname(__file__)\nnavigate_observation_space = spaces.Dict({\n 'pov': spaces.Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),\n 'inventory': spaces.Dict({\n 'dirt': spaces.Box(low=0, high=2304, shape=(), dtype=np.int)\n }),\n 'compassAngle': spaces.Box(low=-179.0, high=180.0, shape=(), dtype=np.float32)\n})\n\nnavigate_action_space = spaces.Dict({\n \"forward\": spaces.Discrete(2),\n \"back\": spaces.Discrete(2),\n \"left\": spaces.Discrete(2),\n \"right\": spaces.Discrete(2),\n \"jump\": spaces.Discrete(2),\n \"sneak\": spaces.Discrete(2),\n \"sprint\": spaces.Discrete(2),\n \"attack\": spaces.Discrete(2),\n \"camera\": spaces.Box(low=-180, high=180, shape=(2,), dtype=np.float32),\n \"place\": spaces.Enum('none', 'dirt')})\n\n\nregister(\n id='MineRLSimple-v0',\n entry_point='minerl.env:MineRLEnv',\n kwargs={\n 'xml': os.path.join(missions_dir, 'navigationDenseFixedMap.xml'),\n 'observation_space': navigate_observation_space,\n 'action_space': navigate_action_space,\n },\n max_episode_steps=600,\n)\n", "sub_path": "SimpleEnvironment/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Dict", "line_number": 10, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 10, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Box", "line_number": 11, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 11, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Dict", "line_number": 12, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 12, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Box", "line_number": 13, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.int", "line_number": 13, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Box", "line_number": 15, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 15, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Dict", "line_number": 18, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 18, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 19, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 19, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 20, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 20, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 21, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 21, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 22, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 22, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 23, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 23, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 24, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 24, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 25, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 25, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 26, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 26, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Box", "line_number": 27, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Enum", "line_number": 28, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 28, "usage_type": "name"}, {"api_name": "gym.envs.registration.register", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "198440922", "text": "\"\"\"\n代码链接:https://github.com/ksivaman/Transfer-image-styling\n代码详解链接:https://youyou-tech.com/2019/10/01/%E4%BB%A3%E7%A0%81%E8%AF%A6%E8%A7%A3%EF%BC%9A%E5%9C%A8Pytorch%E5%92%8CPython/\n\"\"\"\n\n\n\"\"\"第一步:涵盖所有必要的库\"\"\"\nfrom PIL import Image\nfrom io import BytesIO\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\nimport torch\nimport torch.optim as optim\nimport requests\nfrom torchvision import transforms, models\n\n\"\"\"第二步:因为将不会对网络进行训练,在Pytorch中初始化预训练的VGG19模型并冻结所有模型参数,如果NVIDIA GPUs可用,移动模型到cuda。\"\"\"\n\nstrt = time.clock()\n# get the \"features\" portion of VGG19\nvgg = models.vgg19(pretrained=True).features\n\n# freeze VGG params to avoid chanhe\nfor param in vgg.parameters():\n param.requires_grad_(False)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nvgg.to(device)\n\n\ndef load_image(img_path, max_size=400, shape=None):\n ''' Load in and transform an image, making sure the image\n is <= 400 pixels in the x-y dims.'''\n if \"http\" in img_path:\n response = requests.get(img_path)\n image = Image.open(BytesIO(response.content)).convert('RGB')\n else:\n image = Image.open(img_path).convert('RGB')\n \n # large images will slow down processing\n if max(image.size) > max_size:\n size = max_size\n else:\n size = max(image.size)\n \n if shape is not None:\n size = shape\n\n in_transform = transforms.Compose([\n transforms.Resize(size),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n\n # discard the transparent, alpha channel (that's the :3) and add the batch dimension\n image = in_transform(image)[:3,:,:].unsqueeze(0)\n return image\n\n# load in content and style image\n# content = load_image('imgs/tanya_deepak.jpg').to(device)\ncontent = load_image(r'F:\\\\PyStyle\\\\Transfer-image-styling\\\\imgs\\\\cat_small.jpg').to(device)\n\n# Resize style to match content, makes code easier\n# style = load_image('imgs/cat_small_abstract.jpg', shape=content.shape[-2:]).to(device)\nstyle = load_image(r'F:\\\\PyStyle\\\\Transfer-image-styling\\imgs\\\\tanya_deepak_the_scream.jpg', shape=content.shape[-2:]).to(device)\n\n\ndef im_convert(tensor):\n \"\"\"\n Display a tensor as an image.\n 将张量转换为图像\n \"\"\"\n \n image = tensor.to(\"cpu\").clone().detach()\n image = image.numpy().squeeze()\n image = image.transpose(1,2,0)\n image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))\n image = image.clip(0, 1)\n\n return image\n\n\n\"\"\"第三步:定义一个函数以从VGG19网络中提取特征。图层字典中的图层名称是PyTorch预培训的VGG19模型中的预定义名称。\"\"\"\ndef get_features(image, model, layers=None):\n \"\"\" Run an image forward through a model and get the features for \n a set of layers. Default layers are for VGGNet matching Gatys et al (2016)\n\n 运行一个图像通过一个模型向前,并得到的特征图层。默认层用于VGGNet匹配Gatys等\n \"\"\"\n \n ## Need the layers for the content and style representations of an image\n if layers is None:\n layers = {'0': 'conv1_1',\n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1',\n '21': 'conv4_2', ## content representation\n '28': 'conv5_1'}\n \n features = {}\n x = image\n # model._modules is a dictionary holding each module in the model\n for name, layer in model._modules.items():\n x = layer(x)\n if name in layers:\n features[layers[name]] = x\n \n return features\n\n\n\"\"\"第四步:给定特征映射作为张量,定义一个函数来计算gram矩阵。\"\"\"\ndef gram_matrix(tensor):\n\n # 获取张量的 batch_size, depth, height, width\n _, d, h, w = tensor.size()\n \n # reshape so we're multiplying the features for each channel\n tensor = tensor.view(d, h * w)\n \n # calculate the gram matrix\n gram = torch.mm(tensor, tensor.t())\n \n return gram\n\n\"\"\"第五步:获取风格和内容图像的特征,获取风格损失的gram矩阵,将目标图像初始化为风格图像,从5 个gram矩阵的MSE中为损失的线性组合设置风格权重,为两个损失的相对重要性设置内容权重和风格权重(上面的风格损失图像中为“a”),选择用于反向传播的优化器,并设置迭代和修改目标图像的步骤数。\"\"\"\n# get content and style features only once before training\ncontent_features = get_features(content, vgg)\nstyle_features = get_features(style, vgg)\n\n# calculate the gram matrices for each layer of our style representation\nstyle_grams = {layer: gram_matrix(style_features[layer]) for layer in style_features}\n\n# create a third \"target\" image and prep it for change\n# it is a good idea to start of with the target as a copy of our *content* image\n# then iteratively change its style\ntarget = content.clone().requires_grad_(True).to(device)\n\nstyle_weights = {'conv1_1': 1.,\n 'conv2_1': 0.75,\n 'conv3_1': 0.2,\n 'conv4_1': 0.2,\n 'conv5_1': 0.2}\n\ncontent_weight = 1 # alpha\nstyle_weight = 1e6 # beta\n\n# iteration hyperparameters\noptimizer = optim.Adam([target], lr=0.003)\nsteps = 200 # decide how many iterations to update your image (5000)\n\n\n\"\"\"第六步:在保持最小损失的同时,迭代修改目标图像。减少“操作步骤”的次数。\"\"\"\n\nfrom tqdm import tqdm\nfor ii in tqdm(range(1, steps+1)):\n \n # get the features from your target image\n target_features = get_features(target, vgg)\n \n # the content loss\n content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2)\n \n # the style loss\n # initialize the style loss to 0\n style_loss = 0\n # then add to it for each layer's gram matrix loss\n for layer in style_weights:\n # get the \"target\" style representation for the layer\n target_feature = target_features[layer]\n target_gram = gram_matrix(target_feature)\n _, d, h, w = target_feature.shape\n # get the \"style\" style representation\n style_gram = style_grams[layer]\n # the style loss for one layer, weighted appropriately\n layer_style_loss = style_weights[layer] * torch.mean((target_gram - style_gram)**2)\n # add to the style loss\n style_loss += layer_style_loss / (d * h * w)\n \n # calculate the *total* loss\n total_loss = content_weight * content_loss + style_weight * style_loss\n \n # update your target image\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n# 最终的图像\nfinal = im_convert(target)\n# 存储\nmatplotlib.image.imsave('F:\\\\PyStyle\\\\Transfer-image-styling\\\\imgs\\\\cat_style.jpg', final)\n\nend = time.clock()\n\nprint(\"时间是:%d秒\"%(end-strt))\n", "sub_path": "transfer.py", "file_name": "transfer.py", "file_ext": "py", "file_size_in_byte": 7113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "time.clock", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.models.vgg19", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 52, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 53, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 53, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 55, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 151, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.image.imsave", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 193, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "486614595", "text": "from __future__ import absolute_import\n\nimport logging\nimport threading\nfrom rainbow_logging_handler import RainbowLoggingHandler\nimport sys\nfrom docker_conduct.util import synchronized\n\n__author__ = 'nick'\n\n\nclass LevelRangeFilter(logging.Filter):\n\t\"\"\"Specify log level range to accept\"\"\"\n\tdef __init__(self, low, high):\n\t\tsuper(LevelRangeFilter, self).__init__()\n\n\t\tself.low = low\n\t\tself.high = high\n\n\tdef filter(self, record):\n\t\treturn self.low <= record.levelno <= self.high\n\n\n__CONFIGURED_LOGGERS = set()\n\n@synchronized\ndef configure_basic_logging(logger):\n\t\"\"\"\n\tIdempotent and thread-safe basic logging configuration for provided logger object\n\n\tReturns True if logger was configured, False if logger had been previously configured\n\t\"\"\"\n\tif logger not in __CONFIGURED_LOGGERS:\n\t\t__CONFIGURED_LOGGERS.add(logger)\n\n\t\tformatter = logging.Formatter('%(message)s')\n\n\t\tstdout_handler = RainbowLoggingHandler(stream=sys.stdout)\n\t\tstderr_handler = RainbowLoggingHandler(stream=sys.stderr)\n\n\t\tstdout_handler.setFormatter(formatter)\n\t\tstderr_handler.setFormatter(formatter)\n\n\t\tstdout_filter = LevelRangeFilter(logging.DEBUG, logging.INFO)\n\t\tstderr_filter = LevelRangeFilter(logging.WARNING, logging.CRITICAL)\n\n\t\tstdout_handler.addFilter(stdout_filter)\n\t\tstderr_handler.addFilter(stderr_filter)\n\n\t\tstdout_handler.setLevel(logging.DEBUG)\n\t\tstderr_handler.setLevel(logging.DEBUG)\n\n\t\tlogger.addHandler(stdout_handler)\n\t\tlogger.addHandler(stderr_handler)\n\n\t\treturn True\n\n\telse:\n\t\treturn False\n\n\nclass LoggingMixin(object):\n\t\"\"\"Mixin to provide a single preconfigured logger with sensible defaults on class instances\"\"\"\n\n\t__logger = None\n\n\tauto_configure_basic_logging = True\n\tlog_level = logging.DEBUG\n\n\t@property\n\tdef logger(self):\n\t\t\"\"\"Load, cache, and return a logger object. By default, also performs basic configuration on the logger\"\"\"\n\t\tif self.__logger is None:\n\t\t\tself.__logger = self.get_logger()\n\t\t\tself._configure_logging(self.__logger)\n\t\treturn self.__logger\n\n\t@classmethod\n\tdef _configure_logging(cls, logger):\n\t\t\"\"\"Hook to override logging configuration\"\"\"\n\t\tlogger.setLevel(cls.log_level)\n\n\t\tif cls.auto_configure_basic_logging:\n\t\t\tconfigure_basic_logging(logger)\n\n\tdef get_logger(self):\n\t\t\"\"\"Hook to override how the logger is instantiated\"\"\"\n\t\treturn logging.getLogger(self.__module__)\n", "sub_path": "docker_conduct/logging.py", "file_name": "logging.py", "file_ext": "py", "file_size_in_byte": 2295, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.Filter", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 36, "usage_type": "call"}, {"api_name": "rainbow_logging_handler.RainbowLoggingHandler", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rainbow_logging_handler.RainbowLoggingHandler", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 50, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 51, "usage_type": "attribute"}, {"api_name": "docker_conduct.util.synchronized", "line_number": 26, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 68, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "197143379", "text": "import numpy as np\nfrom numpy.random import randn, random, standard_normal\nimport matplotlib.pyplot as plt\nimport logging\nimport uuid\nfrom baselines.constants import *\n\nclass Channel:\n def __init__(self, channel_type, fading=0, rate=None, op_freq=None):\n self.uuid = uuid.uuid4()\n self.channel_type = channel_type\n # self.bw = []\n # self.max_coverage = []\n self.fading = fading\n # self.awgn = awgn\n\n if not rate:\n if channel_type==LTE:\n self.up = 75*MBPS\n self.down = 300*MBPS\n self.op_freq = 2.6*GHZ\n elif channel_type==WIFI1:\n self.up = 135*MBPS\n self.down = 135*MBPS\n self.op_freq = 2.4*GHZ\n elif channel_type==WIFI2:\n self.up = 135*MBPS\n self.down = 135*MBPS\n self.op_freq = 5*GHZ\n elif channel_type==BT:\n self.up = 22*MBPS\n self.down = 22*MBPS\n self.op_freq = 2.4*GHZ\n elif channel_type==NFC:\n self.up = 212*KBPS\n self.down = 212*KBPS\n self.op_freq = 13.56*MHZ\n elif channel_type==NFC:\n self.up = 212*KBPS\n self.down = 212*KBPS\n self.op_freq = 13.56*MHZ\n else: # channel_type==WIRED:\n self.up = 0.02*GBPS\n self.down = 0.02*GBPS\n else:\n self.up = rate[0]\n self.down = rate[1]\n self.op_freq = op_freq\n\n def get_uuid(self):\n return self.uuid.hex\n\n def get_channel_type(self):\n return self.channel_type\n\n def get_rate(self, is_up=True, dist=0):\n # noises = 0\n gain = 1\n if is_up:\n mean_rate = self.up\n else:\n mean_rate = self.down\n\n if self.fading and self.channel_type!=WIRED:\n gain *= 1 + standard_normal()*np.sqrt(self.fading)\n # return np.random.rayleigh( np.sqrt(2/np.pi)*mean_rate )\n return mean_rate*gain\n\ndef main():\n import pdb; pdb.set_trace()\n\nif __name__=='__main__':\n main()\n", "sub_path": "MECS_gym/baselines/channels.py", "file_name": "channels.py", "file_ext": "py", "file_size_in_byte": 2173, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "uuid.uuid4", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "180215726", "text": "import pickle\r\nimport pandas as pd\r\nimport urllib.request\r\nimport json\r\nimport ast\r\nimport streamlit as st\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\n\r\nwith open('framingham_classifier_Logistic_regression_new.pkl', 'rb') as f:\r\n model = pickle.load(f)\r\n\r\n\r\ndef main():\r\n st.title(\" Disease Predictor\")\r\n st.sidebar.header('Patient Details')\r\n age = st.sidebar.number_input(\"Age (years)\", 0, 200, 49)\r\n sysBP = st.sidebar.number_input(\"systolic blood pressure(mmHg)\", 0.0, 500.0, 132.0)\r\n diaBP = st.sidebar.number_input(\"diastolic blood pressure(mmHg)\", 0, 250, 82)\r\n glucose = st.sidebar.number_input(\"glucose level\", 0.0, 1000.0, 81.0)\r\n # diabetes = st.sidebar.number_input('diabetes', 0, 200, 2)\r\n option = st.sidebar.selectbox('Gender', ('Male', 'Female'))\r\n if option == 'Male':\r\n male = 1.0\r\n elif option == 'Female':\r\n male = 0.0\r\n\r\n option2 = st.sidebar.selectbox('Blood Pressure medications', ('Yes', 'No'))\r\n if option2 == 'Yes':\r\n BPMeds = 1.0\r\n elif option2 == 'No':\r\n BPMeds = 0.0\r\n\r\n totChol = st.sidebar.number_input(\"total cholesterol level(mg/dL)\", 0.0, 1000.0, 236.0)\r\n BMI = st.sidebar.number_input(\"BMI(Body Mass Index )\", 0.0, 100.0, 25.0)\r\n option3 = st.sidebar.selectbox('prevalentStroke', ('Yes', 'No'))\r\n if option3 == 'Yes':\r\n prevalentStroke = 1.0\r\n elif option3 == 'No':\r\n prevalentStroke = 0.0\r\n\r\n option4 = st.sidebar.selectbox('prevalentHyp', ('Yes', 'No'))\r\n if option4 == 'Yes':\r\n prevalentHyp = 1.0\r\n elif option4 == 'No':\r\n prevalentHyp = 0.0\r\n\r\n pregnantNo = st.sidebar.number_input(\"pregnant No\", 0, 200, 0)\r\n plasmaGlucoseConc = st.sidebar.number_input(\"Plasma Glucose Concentration\", 0, 500, 120)\r\n tricepsThickness = st.sidebar.number_input(\"Tricep Thickness\", 0, 200, 20)\r\n SerumInsulin = st.sidebar.number_input(\"Serum Insulin\", 0, 20000, 79)\r\n diabPedigreeFunc = st.sidebar.number_input(\"Diabetic Pedigree Function\", 0.001, 100.0, 1.0)\r\n\r\n data1 = {\r\n \"Inputs\": {\r\n \"input1\":\r\n [\r\n {\r\n 'Number of times pregnant': pregnantNo,\r\n 'Plasma glucose concentration a 2 hours in an oral glucose tolerance test': plasmaGlucoseConc,\r\n 'Diastolic blood pressure (mm Hg)': diaBP,\r\n 'Triceps skin fold thickness (mm)': tricepsThickness,\r\n '2-Hour serum insulin (mu U/ml)': SerumInsulin,\r\n 'Body mass index (weight in kg/(height in m)^2)': BMI,\r\n 'Diabetes pedigree function': diabPedigreeFunc,\r\n 'Age (years)': age,\r\n 'Class variable (0 or 1)': \"0\",\r\n }\r\n ],\r\n },\r\n \"GlobalParameters\": {}\r\n }\r\n body = str.encode(json.dumps(data1))\r\n\r\n url = 'https://ussouthcentral.services.azureml.net/workspaces/13c077d4051e4e1088654297b2bbcb04/services/934466005a2243948e5d6b46d9cdec64/execute?api-version=2.0&format=swagger'\r\n api_key = 'u4bfO9QM3gPLQ4nbSXiFNXP/h4B3yO0QE1lQy0/GOSqPwgOTFwAyWr4WXEYKj4tfrvZ/mIvRZpH2b5bn9QxHgg==' # Replace this with the API key for the web service\r\n headers = {'Content-Type': 'application/json', 'Authorization': ('Bearer ' + api_key)}\r\n\r\n req = urllib.request.Request(url, body, headers)\r\n\r\n try:\r\n response = urllib.request.urlopen(req)\r\n result = response.read()\r\n my_json = result.decode('utf8').replace(\"'\", '\"')\r\n data = json.loads(my_json)\r\n s = json.dumps(data, indent=4, sort_keys=True)\r\n FinalData = data[\"Results\"]['output1']\r\n res = str(FinalData)[1:-1]\r\n json_data = ast.literal_eval(res)\r\n FinalOutputAzure = json_data[\"Scored Labels\"]\r\n NewDiabetesColumn = json_data[\"Scored Labels\"]\r\n\r\n except urllib.error.HTTPError as error:\r\n print(\"The request failed with status code: \" + str(error.code))\r\n # Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure\r\n print(error.info())\r\n print(json.loads(error.read().decode(\"utf8\", 'ignore')))\r\n\r\n input_variables = pd.DataFrame(\r\n [[age, sysBP, diaBP, glucose, NewDiabetesColumn, male, BPMeds, totChol, BMI, prevalentStroke, prevalentHyp]],\r\n columns=['age', 'sysBP', 'diaBP', 'glucose', 'diabetes', 'male', 'BPMeds', 'totChol', 'BMI',\r\n 'prevalentStroke', 'prevalentHyp'],\r\n dtype=float)\r\n result2 = \"\"\r\n\r\n azureresult = int(FinalOutputAzure)\r\n\r\n # if st.sidebar.button(\"Predict\"):\r\n result2 = model.predict(input_variables)[0]\r\n if result2 == 1:\r\n result2 = 'Positive'\r\n elif result2 == 0:\r\n result2 = 'Negative'\r\n\r\n if azureresult == 1:\r\n azureresult = 'Positive'\r\n elif azureresult == 0:\r\n azureresult = 'Negative'\r\n\r\n st.subheader(\"Predicted result for Coronary Heart Diseases in next 10 years:\")\r\n st.success(result2)\r\n\r\n st.subheader(\"Predicted result for diabetes from AzureML\")\r\n st.success(azureresult)\r\n\r\n heart_raw = pd.read_csv('Preprocessed_framingham.csv')\r\n heart_pro = heart_raw.drop(columns=['TenYearCHD'])\r\n df = pd.DataFrame(heart_pro)\r\n\r\n normal_up = [295, 142.5, 394, 696, 56.8, 199, 99, 846, 2.42]\r\n normal_down = [83.5, 48, 40, 107, 15.54, 0, 0,0, 0.078]\r\n current = [sysBP, diaBP, glucose, totChol, BMI, plasmaGlucoseConc, tricepsThickness,\r\n SerumInsulin, diabPedigreeFunc]\r\n\r\n names = ['sysBP', 'diaBP', 'glucose', 'totChol', 'BMI', 'plasmaGlucoseConc',\r\n 'tricepsThickness',\r\n 'SerumInsulin', 'diabPedigreeFunc']\r\n\r\n li = [normal_up, normal_down, current]\r\n chart_data = pd.DataFrame({'Upper Limit': normal_up,\r\n 'Lower Limit': normal_down,\r\n 'Current Position': current})\r\n\r\n st.subheader('')\r\n\r\n fig = go.Figure(data=[\r\n go.Bar(name='Upper Limit', x=names, y=normal_up),\r\n go.Bar(name='Lower Limit', x=names, y=normal_down),\r\n go.Bar(name='Current Position', x=names, y=current)])\r\n fig.update_layout(title={\r\n 'text': \"Range of Safty \",\r\n 'y': 0.9,\r\n 'x': 0.4,\r\n 'xanchor': 'center',\r\n 'yanchor': 'top'}, font=dict(\r\n family=\"Courier New, monospace\",\r\n size=13,\r\n color=\"black\"\r\n ))\r\n st.plotly_chart(fig)\r\n\r\n st.title('Data Distribution')\r\n\r\n df1 = df.head(400)\r\n fig = px.scatter(df1, x=\"totChol\", y=\"age\",\r\n size=\"heartRate\", color=\"glucose\",\r\n hover_name=\"age\", log_x=True, size_max=30)\r\n st.plotly_chart(fig)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6826, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pickle.load", "line_number": 11, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.sidebar.header", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 16, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 17, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 18, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 19, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 20, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 22, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 28, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 34, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 35, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 36, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 42, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 48, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 48, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 49, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 50, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 51, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 52, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 73, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 79, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 79, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 79, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 82, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 82, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 82, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 85, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 89, "usage_type": "call"}, {"api_name": "urllib.request.error", "line_number": 93, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 93, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 99, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 120, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 121, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 123, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 140, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 144, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 146, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 146, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 147, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 147, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 148, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 148, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 149, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 149, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 160, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 162, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 165, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 165, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "543223032", "text": "\"\"\"magazine_3_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.get_index.as_view(), name='index'),\n path('author/', views.get_author, name='author'),\n path('article/', views.get_single, name='article'),\n path('topic/', views.get_category, name='topic'),\n path('login', views.get_login, name='login'),\n path('logout', views.get_logout, name='logout'),\n path('create', views.get_create, name='create'),\n path('profile', views.get_profile, name='profile'),\n path('update/', views.get_update, name='edit'),\n path('del/', views.get_delete, name='del'),\n path('register', views.get_register, name='register'),\n path('topics', views.get_topics, name='category'),\n path('create/topic', views.create_topics, name='create_topic'),\n path('delete/topic/', views.delete_topics, name='del_topic'),\n path('edit/topic/', views.update_topics, name='edit_topic'),\n]\n", "sub_path": "magazine/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "433931448", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom mock import Mock, patch\nfrom datetime import datetime\nimport pytz\n\nfrom django.test import TestCase\n\nimport pytest\n\nimport requests\nfrom requests import ConnectionError, Timeout, RequestException\n\nfrom feeds.models import Feed\nfrom feeds.tasks import update_feed, get_feed_info\n\n\nclass TestUpdateFeed(TestCase):\n value = {\n 'url': 'https://blog.cloudflare.com/rss/',\n 'title': 'CloudFlare',\n 'description': 'CloudFlare',\n 'feed_updated': datetime(2014, 10, 3, 6, 0, 0, tzinfo=pytz.utc)\n }\n\n def test_update_feed_successful(self):\n val = dict(self.value)\n feed = Feed.objects.create(**val)\n val.update({'title': 'CloudFlare Blog'}) # update the title\n update_feed(val['url'], val)\n assert Feed.objects.get(url=val['url']).title == 'CloudFlare Blog'\n\n def test_update_feed_not_exist(self):\n update_feed(self.value['url'], self.value)\n\n def test_update_feed_invalid_form(self):\n val = dict(self.value)\n feed = Feed.objects.create(**val)\n val.update({'title': 'abcdefghijklmnopqrstuvwxyz' * 10})\n update_feed(val['url'], val)\n assert Feed.objects.get(url=val['url']).title == 'CloudFlare'\n\n def test_update_feed_form_clean(self):\n val = dict(self.value)\n val.update({'description': 'abcdefghijklmnopqrstuvwxyz' * 5})\n feed = Feed.objects.create(**val)\n val.update({'description': 'abcdefghijklmnopqrstuvwxyz' * 20})\n update_feed(val['url'], val)\n assert len(Feed.objects.get(url=val['url']).description) == 200\n\n\nclass TestAddNewFeed(TestCase):\n value = {\n 'url': 'https://blog.cloudflare.com/rss/',\n 'title': 'CloudFlare',\n 'description': 'CloudFlare',\n 'feed_updated': str(datetime(2014, 10, 2, 18, 17, 28))\n }\n\n def test_get_feed_info_successful(self):\n resp = Mock()\n resp.status_code = 200\n resp.text = \"\"\"\n\n\n<![CDATA[ CloudFlare ]]>\n\n\n\n\nhttp://blog.cloudflare.com/\nGhost 0.6\nFri, 3 Oct 2014 01:17:28 GMT\n\n60\n\n\n<![CDATA[ Of Phishing Attacks and WordPress 0days ]]>\n\n\nProxying around 5% of the Internet’s requests gives us an interesting vantage point from which to observe malicious behavior. It also make us a target. Aside from the many, varied denial of service attacks that break against our defenses we also see huge number of phishing campaigns. In this

\n]]>\n
\n\nhttp://blog.cloudflare.com/of-phishing-attacks-and-wordpress-0days/\n\n685f03c1-34a2-4e55-8b19-877c0211615a\n\n\n\nFri, 3 Oct 2014 01:17:28 GMT\n
\n
\n
\n \"\"\"\n with patch.object(requests, 'get', return_value=resp):\n data, errors = get_feed_info(self.value['url'])\n assert data == self.value\n assert errors == []\n\n def test_get_feed_info_not_supported_err(self):\n resp = Mock()\n resp.status_code = 200\n resp.text = 'test pagemain content'\n with patch.object(requests, 'get', return_value=resp):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to get this new feed: Unsupported feed format.']\n\n def test_get_feed_info_404_err(self):\n resp = Mock()\n resp.status_code = 404\n with patch.object(requests, 'get', return_value=resp):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to get this new feed.']\n\n def test_get_feed_info_timeout_err(self):\n with patch.object(requests, 'get', side_effect=Timeout):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to add this new feed: Timeout. Please try again.']\n\n def test_get_feed_info_connection_err(self):\n with patch.object(requests, 'get', side_effect=ConnectionError):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to get this new feed.']\n\n\n", "sub_path": "tests/test_feeds/test_tasks.py", "file_name": "test_tasks.py", "file_ext": "py", "file_size_in_byte": 4804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.test.TestCase", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 25, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed.objects.create", "line_number": 30, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 30, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 32, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 33, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 36, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.create", "line_number": 40, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 40, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 42, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 43, "usage_type": "name"}, {"api_name": "feeds.models.Feed.objects.create", "line_number": 48, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 48, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 50, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.get", "line_number": 51, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 51, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 54, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 63, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 99, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 99, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 100, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 105, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 108, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 108, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 109, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 114, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 116, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 116, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 117, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 122, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 122, "usage_type": "name"}, {"api_name": "requests.Timeout", "line_number": 122, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 123, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 128, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 128, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 128, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "630406564", "text": "# -*- coding: utf-8 -*-\n# pylint: disable=import-error, no-name-in-module, no-member\n\"\"\"\nModule for testing a tfrecord loading module.\n\n\"\"\"\n\nimport os\nimport sys\nfrom kmlm.base.common import ExitCode, Logger, ParseOption\nfrom kmlm.base.tfrecord_gen import TFRecordGenerator\nfrom kmlm.base.utils import KmlmUtil as Util\n\ndef main():\n logger = Logger(name=\"TFRecord gen test for Keras\", level=Logger.INFO).logger\n config = ParseOption(sys.argv, logger).args\n\n # Setting paths\n text_list = Util.get_file_path(config.paths_data_path,\n config.paths_text_corpus)\n tfrecord_path = \"%s.tfrecord\"%text_list\n\n # Loading vocabularies\n vocab_path = Util.get_file_path(config.paths_data_path, config.paths_vocab)\n if not os.path.isfile(vocab_path):\n logger.critical(\"%s does not exist.\", vocab_path)\n sys.exit(ExitCode.INVALID_FILE_PATH)\n vocab, _ = Util.load_vocab(vocab_path, config=config)\n\n import keras.backend as k\n batch, init = TFRecordGenerator.load_tfrecord(tfrecord_path,\n config.train_batch)\n k.get_session().run(init)\n for i, value in \\\n enumerate(TFRecordGenerator.text_tfrecord_generator(batch,\n config,\n vocab)):\n if i >= 2:\n break\n logger.info(value)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "kmlm/base/tfrecord_gen_keras_test.py", "file_name": "tfrecord_gen_keras_test.py", "file_ext": "py", "file_size_in_byte": 1432, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "kmlm.base.common.Logger", "line_number": 15, "usage_type": "call"}, {"api_name": "kmlm.base.common.Logger.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "kmlm.base.common.ParseOption", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "kmlm.base.utils.KmlmUtil.get_file_path", "line_number": 19, "usage_type": "call"}, {"api_name": "kmlm.base.utils.KmlmUtil", "line_number": 19, "usage_type": "name"}, {"api_name": "kmlm.base.utils.KmlmUtil.get_file_path", "line_number": 24, "usage_type": "call"}, {"api_name": "kmlm.base.utils.KmlmUtil", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 27, "usage_type": "call"}, {"api_name": "kmlm.base.common.ExitCode.INVALID_FILE_PATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "kmlm.base.common.ExitCode", "line_number": 27, "usage_type": "name"}, {"api_name": "kmlm.base.utils.KmlmUtil.load_vocab", "line_number": 28, "usage_type": "call"}, {"api_name": "kmlm.base.utils.KmlmUtil", "line_number": 28, "usage_type": "name"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator.load_tfrecord", "line_number": 31, "usage_type": "call"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator", "line_number": 31, "usage_type": "name"}, {"api_name": "keras.backend.get_session", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 33, "usage_type": "name"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator.text_tfrecord_generator", "line_number": 35, "usage_type": "call"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "516290640", "text": "import os\nimport json\nfrom google.cloud import bigquery\nfrom google.cloud import error_reporting\nfrom google.api_core import retry\nfrom google.cloud import firestore\nfrom xml.etree import ElementTree\nimport traceback\nimport logging\nimport requests\nimport datetime\nimport pytz\nimport pandas as pd\nimport dateutil\n\nPROJECT_ID = os.getenv(\"GCP_PROJECT\")\nBQ_DATASET = 'vta_vs'\nBQ_TABLE = 'weather_forecast'\nBQ = bigquery.Client()\nDB = firestore.Client()\nclient = error_reporting.Client()\n\n\ndef weather(request):\n \"\"\"\n Responds to a request from Cloud Scheduler. When invoked, gets the weather forecast\n for the (constant) list of lat/long combinations and stores the result in a BigQuery table.\n :param request:\n :return: None\n \"\"\"\n\n # get the forecast\n lat_lon_str_escaped = os.getenv(\"LAT_LON_STR\")\n forecast_url = (\n \"\"\"https://graphical.weather.gov/xml/sample_products/browser_interface/ndfdXMLclient.php?\"\"\"\n \"\"\"whichClient=NDFDgenLatLonList\"\"\"\n \"\"\"&listLatLon={}\"\"\"\n \"\"\"&product=time-series\"\"\"\n \"\"\"&Unit=m\"\"\"\n \"\"\"&temp=temp\"\"\"\n \"\"\"&pop12=pop12\"\"\"\n \"\"\"&Submit=Submit\"\"\").format(lat_lon_str_escaped)\n response = requests.get(forecast_url)\n if response.status_code == 200:\n logging.info(\"Downloaded forecast.\")\n response_xml = ElementTree.fromstring(response.content)\n forecast_time = response_xml.find('head').find('product').find('creation-date').text\n else:\n logging.error(\"Non-success return code from NDFD request\")\n raise RuntimeError('Non-success return code from NDFD request')\n\n # see if we have already seen this record\n logging.info(\"Checking for duplicates.\")\n db_ref = DB.document(u'weather_forecasts/%s' % forecast_time)\n if _was_already_ingested(db_ref):\n logging.warning('Duplication attempt streaming file \\'%s\\'' % db_ref.id)\n return\n else:\n try:\n logging.info(\"Inserting into BigQuery.\")\n _insert_into_bigquery(response_xml, forecast_time)\n _handle_success(db_ref)\n except Exception:\n logging.error(\"Could not insert into BigQuery\")\n _handle_error(db_ref)\n\n\ndef _was_already_ingested(db_ref):\n status = db_ref.get()\n return status.exists and status.to_dict()['success']\n\n\ndef _now():\n return datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')\n\n\ndef _handle_success(db_ref):\n message = 'Forecast \\'%s\\' streamed into BigQuery' % db_ref.id\n doc = {\n u'success': True,\n u'when': _now()\n }\n db_ref.set(doc)\n logging.info(message)\n\n\ndef _handle_error(db_ref):\n message = 'Error streaming forecast \\'%s\\'. Cause: %s' % (db_ref.id, traceback.format_exc())\n doc = {\n u'success': False,\n u'error_message': message,\n u'when': _now()\n }\n db_ref.set(doc)\n logging.error(message)\n\n\ndef _insert_into_bigquery(weather_xml, forecast_time):\n\n tree = weather_xml.find('data')\n time_layouts_df = pd.DataFrame()\n logging.info(\"Processing time\")\n for time_layout in tree.findall('time-layout'):\n time_layouts = []\n time_layout_key = time_layout.find('layout-key').text\n for index, start_time in enumerate(time_layout.findall('start-valid-time')):\n time_layouts.append({'time_layout': time_layout_key,\n 'time_index': index,\n 'time': dateutil.parser.parse(start_time.text)})\n time_layouts_df = pd.concat([time_layouts_df, pd.DataFrame(time_layouts)])\n\n logging.info(\"Processing parameters\")\n parameters_df = pd.DataFrame()\n for parameter in tree.findall('parameters'):\n point_name = parameter.attrib['applicable-location']\n for observation in parameter:\n observations = []\n units = observation.attrib['units']\n time_layout = observation.attrib['time-layout']\n observation_name = \"{} ({})\".format(observation.find('name').text, units)\n for time_index, value in enumerate(observation.findall('value')):\n observations.append({\"point_name\": point_name,\n \"time_layout\": time_layout,\n \"time_index\": time_index,\n observation_name: value.text\n })\n observation_df = pd.DataFrame(observations)\n observation_df = observation_df.merge(time_layouts_df)\n observation_df.drop([\"time_layout\", \"time_index\"], axis=1, inplace=True)\n observation_df = observation_df.set_index(\"time\").resample(\"H\").first().ffill()\n parameters_df = pd.concat([parameters_df, observation_df])\n parameters_df = parameters_df.groupby(['point_name', 'time']).last().reset_index().dropna()\n parameters_df['time'] = parameters_df.time.apply(lambda x: x.astimezone('UTC'))\n parameters_df['forecast_time'] = forecast_time\n parameters_df['temperature_c'] = parameters_df['Temperature (Celsius)']\n parameters_df['pop12'] = parameters_df['12 Hourly Probability of Precipitation (percent)']\n\n logging.info(\"Converting rows to json\")\n rows = json.loads(parameters_df[[\n 'point_name',\n 'time',\n 'forecast_time',\n 'temperature_c',\n 'pop12'\n ]].to_json(orient='records'))\n row_ids = [forecast_time]\n table = BQ.dataset(BQ_DATASET).table(BQ_TABLE)\n logging.info(\"Starting insert into BigQuery\")\n errors = BQ.insert_rows_json(table,\n json_rows=rows,\n row_ids=row_ids,\n retry=retry.Retry(deadline=30))\n if errors:\n logging.error(errors)\n raise BigQueryError(errors)\n\n\nclass BigQueryError(Exception):\n '''Exception raised whenever a BigQuery error happened'''\n\n def __init__(self, errors):\n super().__init__(self._format(errors))\n self.errors = errors\n\n def _format(self, errors):\n err = []\n for error in errors:\n err.extend(error['errors'])\n return json.dumps(err)\n", "sub_path": "functions/weather/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.Client", "line_number": 19, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 19, "usage_type": "name"}, {"api_name": "google.cloud.firestore.Client", "line_number": 20, "usage_type": "call"}, {"api_name": "google.cloud.firestore", "line_number": 20, "usage_type": "name"}, {"api_name": "google.cloud.error_reporting.Client", "line_number": 21, "usage_type": "call"}, {"api_name": "google.cloud.error_reporting", "line_number": 21, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 46, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 46, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.utcnow", "line_number": 74, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 74, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 84, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 102, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 109, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 138, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 139, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 148, "usage_type": "call"}, {"api_name": "google.api_core.retry.Retry", "line_number": 152, "usage_type": "call"}, {"api_name": "google.api_core.retry", "line_number": 152, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 154, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "611938727", "text": "import os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\n\ndef main():\n matplotlib.rcParams['text.usetex'] = True\n sns.set(font_scale=4, style=\"whitegrid\")\n base_dir = \"results/resultsRHtest7_12_18_c/rh_add5_repeat10\"\n num_req_field = \"Number of Concurrent Requests\"\n times_field = \"Response Time (s)\"\n title = \"Resource Registration\"\n base_filename = \"rh\"\n file_format = \"eps\"\n show_outliers = False\n ymax = 5\n ytick = 1\n reqs = []\n times = []\n\n files = os.listdir(base_dir)\n\n for f_name in files:\n try:\n num_req = int(f_name.split('_')[1])\n except IndexError:\n num_req = -1\n\n if 0 <= num_req <= 100:\n if num_req % 5 != 0:\n continue\n with open(base_dir + \"/\" + f_name, 'rb') as f:\n for line in f.readlines()[:num_req]:\n reqs.append(num_req)\n times.append(int(line.split()[2]) / 1000.0)\n\n data_frame = pd.DataFrame({num_req_field: reqs, times_field: times})\n response_times_boxplot = pd.melt(data_frame, id_vars=num_req_field, value_name=times_field)\n\n font = {\n 'family': 'Liberation Sans',\n 'weight': 'normal'\n }\n\n plt.rc('font', **font)\n plt.yticks(np.arange(0, ymax + 1, ytick))\n # plt.xlabel(\"x label\")\n # plt.ylabel(\"y label\")\n\n plt.title(title)\n plt.ylim(ymax=ymax)\n # plt.legend(['True Positive Ratio'], loc='lower right')\n # plt.legend(loc='upper right', prop={'size': 40})\n sns.boxplot(x=num_req_field, y=times_field, data=response_times_boxplot, showfliers=show_outliers, notch=True)\n # plt.grid(axis='y')\n # plt.grid(axis='x')\n fig = plt.gcf()\n # fig.tight_layout(pad=0.7 * 22 / font_size)\n # fig.tight_layout()\n fig.set_size_inches(20, 14)\n # plt.show()\n plt.savefig(file_format + \"/\" + base_filename + \".\" + file_format)\n #\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "rh_boxplots.py", "file_name": "rh_boxplots.py", "file_ext": "py", "file_size_in_byte": 2001, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "seaborn.set", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.melt", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "197824205", "text": "import psycopg2\nimport psycopg2.extras\nimport logging\nfrom pathlib import Path\nfrom configparser import ConfigParser\nimport logging\nimport os.path\nimport xml.etree.ElementTree as ET\n\n\n\n\ndef load_config(config_file=\"database.ini\"):\n \"\"\"\n Load the configuration file\n :param config_file:\n :return:\n \"\"\"\n db = {}\n parser = ConfigParser()\n parser.read( config_file )\n params = parser.items(\"POSTGRES\")\n for param in params:\n db[param[0]] = param[1]\n return db\n\n\ndef get_connection_local_pg(params):\n \"\"\"\n Return the Postgres db Connection\n\n :param db:\n :param config:\n :return:\n \"\"\"\n #conn_string_no_passwd = params\n\n #logging.info(conn_string_no_passwd)\n conn = psycopg2.connect(**params)\n\n cur = conn.cursor()\n logging.info('PostgreSQL database version:')\n cur.execute('SELECT version()')\n db_version = cur.fetchone()\n logging.info(db_version)\n # print(cursor)\n # conn.cursor will return a cursor object, you can use this cursor to perform queries\n\n return conn\n\n\ndef load_annotations(conn,pat_note_id, name, gold_path):\n cur = conn.cursor()\n file_path = gold_path + \"/\" + name.replace(\"txt\", \"xml\")\n if os.path.isfile(file_path):\n f = open(file_path)\n logging.info(f\"Annotation file found for {file_path}\")\n tree = ET.parse(file_path)\n root = tree.getroot()\n for tags in root.findall('TAGS'):\n logging.info(f\"TAGS {tags}\" )\n for tag in tags.iter():\n if len( tag.keys() ) > 0 :\n logging.info(f\"TAG { tag.tag } : { tag.attrib }\" )\n insert_sql = 'INSERT INTO pat_annotations ( pat_note_id, category, type, pos_id, start, stop, text) VALUES ( %s,%s, %s, %s, %s, %s, %s) RETURNING id'\n keys = tag.attrib.keys();\n logging.info(f\" KEYS for tag : {keys}\")\n # os.sys.exit(1)\n cur.execute(insert_sql, (pat_note_id, tag.attrib[\"TYPE\"], tag.tag,tag.attrib['id'], tag.attrib['start'],tag.attrib['end'] ,tag.attrib['text']))\n conn.commit()\n else:\n logging.error(f\"Annotation file not found for {file_path}\")\n os.sys.exit(1)\n\ndef import_data(conn, path, gold_path):\n cur = conn.cursor()\n create_sql_pat_notes = 'CREATE TABLE IF NOT EXISTS \"i2b2_data\".\"public\".pat_notes ( id SERIAL NOT NULL, file_name VARCHAR, note VARCHAR, PRIMARY KEY (id) )'\n create_anno = 'CREATE TABLE IF NOT EXISTS \"i2b2_data\".\"public\".pat_annotations ( id SERIAL NOT NULL, pat_note_id INTEGER, category CHARACTER VARYING, type CHARACTER VARYING, pos_id CHARACTER VARYING, START NUMERIC, STOP NUMERIC, TEXT CHARACTER VARYING, CONSTRAINT patannotations_fk1 FOREIGN KEY (pat_note_id) REFERENCES \"i2b2_data\".\"public\".\"pat_notes\" (\"id\") )'\n res = cur.execute(create_sql_pat_notes)\n res = cur.execute(create_anno)\n conn.commit()\n\n # Truncate table\n truncate_sql = 'TRUNCATE TABLE \"i2b2_data\".\"public\".pat_notes CASCADE'\n truncate_sql_ann = 'TRUNCATE TABLE \"i2b2_data\".\"public\".pat_annotations CASCADE '\n res = cur.execute(truncate_sql_ann)\n res = cur.execute(truncate_sql)\n # Read files and import\n with os.scandir(path) as entries:\n for entry in entries:\n if entry.is_file():\n logging.info(f\"Importing file {entry.name}\")\n with open(entry, 'r') as f:\n data = f.read()\n insert_sql = 'insert into \"i2b2_data\".\"public\".pat_notes(file_name, note) values (%s, %s) RETURNING id'\n cur.execute(insert_sql, (entry.name,data,))\n row_id = cur.fetchone()[0]\n logging.info(f\"Inserted row {row_id} \")\n load_annotations(conn,row_id , entry.name, gold_path )\n conn.commit()\n\n cur.close()\n conn.commit()\n return None", "sub_path": "db_connection.py", "file_name": "db_connection.py", "file_ext": "py", "file_size_in_byte": 3926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "configparser.ConfigParser", "line_number": 20, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 55, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 57, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 58, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 58, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 67, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.sys.exit", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.sys", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 73, "usage_type": "name"}, {"api_name": "os.path.scandir", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "291362660", "text": "import logging\nimport os\nimport shutil\nimport subprocess\n\nfrom middlewared.job import Pipes\nfrom middlewared.service import CallError, item_method, job, Service\nfrom middlewared.schema import accepts, Dict, Int, Str\nfrom middlewared.utils import osc, run\nfrom middlewared.utils.shell import join_commandline\n\n\nlogger = logging.getLogger(__name__)\n\n# platform specific imports\nif osc.IS_FREEBSD:\n import sysctl\n\n\nclass PoolService(Service):\n\n @item_method\n @accepts(\n Int('id'),\n Dict(\n 'options',\n Dict(\n 'geli',\n Str('passphrase', private=True, default=''),\n ),\n )\n )\n @job(lock='pool_expand')\n async def expand(self, job, id, options):\n \"\"\"\n Expand pool to fit all available disk space.\n \"\"\"\n pool = await self.middleware.call('pool.get_instance', id)\n if osc.IS_LINUX:\n if options.get('passphrase'):\n raise CallError('Passphrase should not be supplied for this platform.')\n # FIXME: We have issues in ZoL where when pool is created with partition uuids, we are unable\n # to expand pool where all pool related options error out saying I/O error\n # https://github.com/zfsonlinux/zfs/issues/9830\n raise CallError('Expand is not supported on this platform yet because of underlying ZFS issues.')\n else:\n if pool['encrypt']:\n if not pool['is_decrypted']:\n raise CallError('You can only expand decrypted pool')\n\n for error in (\n await self.middleware.call('pool.pool_lock_pre_check', pool, options['geli']['passphrase'])\n ).errors:\n raise CallError(error.errmsg)\n\n all_partitions = {p['name']: p for p in await self.middleware.call('disk.list_all_partitions')}\n\n try:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 16\n geli_resize = []\n try:\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK':\n logger.debug('Not expanding vdev of type %r', vdev['type'])\n continue\n\n if vdev['status'] != 'ONLINE':\n logger.debug('Not expanding vdev that is %r', vdev['status'])\n continue\n\n part_data = all_partitions.get(vdev['device'])\n if not part_data:\n logger.debug('Unable to find partition data for %s', vdev['device'])\n\n partition_number = part_data['partition_number']\n if not partition_number:\n logger.debug('Could not parse partition number from %r', vdev['device'])\n continue\n\n assert part_data['disk'] == vdev['disk']\n\n if osc.IS_LINUX:\n await run(\n 'sgdisk', '-d', str(partition_number), '-n', f'{partition_number}:0:0',\n '-c', '2:', '-u', f'{partition_number}:{part_data[\"partition_uuid\"]}',\n '-t', f'{partition_number}:BF01', part_data['path']\n )\n await run('partprobe', os.path.join('/dev', part_data['disk']))\n else:\n await run('camcontrol', 'reprobe', vdev['disk'])\n await run('gpart', 'recover', vdev['disk'])\n await run('gpart', 'resize', '-i', str(partition_number), vdev['disk'])\n\n if osc.IS_FREEBSD and pool['encrypt']:\n geli_resize_cmd = (\n 'geli', 'resize', '-s', str(part_data['size']), vdev['device']\n )\n rollback_cmd = (\n 'gpart', 'resize', '-i', str(partition_number), '-s', str(part_data['size']), vdev['disk']\n )\n\n logger.warning('It will be obligatory to notify GELI that the provider has been resized: %r',\n join_commandline(geli_resize_cmd))\n logger.warning('Or to resize provider back: %r',\n join_commandline(rollback_cmd))\n geli_resize.append((geli_resize_cmd, rollback_cmd))\n finally:\n if osc.IS_FREEBSD and geli_resize:\n await self.__geli_resize(pool, geli_resize, options)\n finally:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 0\n\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK' or vdev['status'] != 'ONLINE':\n continue\n\n await self.middleware.call('zfs.pool.online', pool['name'], vdev['guid'], True)\n\n async def __geli_resize(self, pool, geli_resize, options):\n failed_rollback = []\n\n lock_job = await self.middleware.call('pool.lock', pool['id'], options['geli']['passphrase'])\n await lock_job.wait()\n if lock_job.error:\n logger.warning('Error locking pool: %s', lock_job.error)\n\n for geli_resize_cmd, rollback_cmd in geli_resize:\n if not await self.__run_rollback_cmd(rollback_cmd):\n failed_rollback.append(rollback_cmd)\n\n if failed_rollback:\n raise CallError(\n 'Locking your encrypted pool failed and rolling back changes failed too. '\n f'You\\'ll need to run the following commands manually:\\n%s' % '\\n'.join(\n map(join_commandline, failed_rollback)\n )\n )\n else:\n for geli_resize_cmd, rollback_cmd in geli_resize:\n try:\n await run(*geli_resize_cmd, encoding='utf-8', errors='ignore')\n except subprocess.CalledProcessError as geli_resize_error:\n if geli_resize_error.stderr.strip() == 'geli: Size hasn\\'t changed.':\n logger.info(\n '%s: %s', join_commandline(geli_resize_cmd), geli_resize_error.stderr.strip()\n )\n else:\n logger.error(\n '%r failed: %s. Resizing partition back', join_commandline(geli_resize_cmd),\n geli_resize_error.stderr.strip()\n )\n if not await self.__run_rollback_cmd(rollback_cmd):\n failed_rollback.append(rollback_cmd)\n\n if failed_rollback:\n raise CallError(\n 'Resizing partitions of your encrypted pool failed and rolling back '\n 'changes failed too. You\\'ll need to run the following commands manually:\\n%s' %\n '\\n'.join(map(join_commandline, failed_rollback))\n )\n\n if options['geli']['passphrase']:\n unlock_job = await self.middleware.call(\n 'pool.unlock', pool['id'], {'passphrase': options['geli']['passphrase']}\n )\n else:\n unlock_job = await self.middleware.call(\n 'pool.unlock', pool['id'], {'recoverykey': True},\n pipes=Pipes(input=self.middleware.pipe())\n )\n\n def copy():\n with open(pool['encryptkey_path'], 'rb') as f:\n shutil.copyfileobj(f, unlock_job.pipes.input.w)\n\n try:\n await self.middleware.run_in_thread(copy)\n finally:\n await self.middleware.run_in_thread(unlock_job.pipes.input.w.close)\n\n await unlock_job.wait()\n if unlock_job.error:\n raise CallError(unlock_job.error)\n\n @staticmethod\n async def __run_rollback_cmd(rollback_cmd):\n try:\n await run(*rollback_cmd, encoding='utf-8', errors='ignore')\n except subprocess.CalledProcessError as rollback_error:\n logger.critical(\n '%r failed: %s. To restore your pool functionality you will have to run this command manually.',\n join_commandline(rollback_cmd),\n rollback_error.stderr.strip()\n )\n return False\n else:\n return True\n", "sub_path": "src/middlewared/middlewared/plugins/pool_/expand.py", "file_name": "expand.py", "file_ext": "py", "file_size_in_byte": 8588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 16, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 16, "usage_type": "name"}, {"api_name": "middlewared.service.Service", "line_number": 20, "usage_type": "name"}, {"api_name": "middlewared.utils.osc.IS_LINUX", "line_number": 39, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 39, "usage_type": "name"}, {"api_name": "middlewared.service.CallError", "line_number": 41, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 45, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 49, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 54, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 59, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 59, "usage_type": "name"}, {"api_name": "sysctl.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_LINUX", "line_number": 83, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 83, "usage_type": "name"}, {"api_name": "middlewared.utils.run", "line_number": 84, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "middlewared.utils.run", "line_number": 91, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 92, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 93, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 95, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 95, "usage_type": "name"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 104, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 106, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 109, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 109, "usage_type": "name"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 112, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 112, "usage_type": "name"}, {"api_name": "sysctl.filter", "line_number": 113, "usage_type": "call"}, {"api_name": "middlewared.service.item_method", "line_number": 22, "usage_type": "name"}, {"api_name": "middlewared.schema.accepts", "line_number": 23, "usage_type": "call"}, {"api_name": "middlewared.schema.Int", "line_number": 24, "usage_type": "call"}, {"api_name": "middlewared.schema.Dict", "line_number": 25, "usage_type": "call"}, {"api_name": "middlewared.schema.Dict", "line_number": 27, "usage_type": "call"}, {"api_name": "middlewared.schema.Str", "line_number": 29, "usage_type": "call"}, {"api_name": "middlewared.service.job", "line_number": 33, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 134, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 137, "usage_type": "argument"}, {"api_name": "middlewared.utils.run", "line_number": 143, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 144, "usage_type": "attribute"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 147, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 151, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 158, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 161, "usage_type": "argument"}, {"api_name": "middlewared.job.Pipes", "line_number": 171, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 176, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 185, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 190, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 191, "usage_type": "attribute"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "522808363", "text": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flask import Blueprint, redirect, render_template, request, url_for\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport json\nimport datetime\n\n\ncrud = Blueprint('crud', __name__)\n\ncred = credentials.Certificate('eu1-kubernetes-169431998c5e.json')\nfirebase_admin.initialize_app(cred)\ndb = firestore.client()\n\n\n# [START list]\n@crud.route(\"/\")\ndef list():\n print(\"Show books\")\n users_ref = db.collection(u'users')\n docs = users_ref.get()\n\n for doc in docs:\n print(u'{} => {}'.format(doc.id, doc.to_dict()))\n return render_template(\n \"list.html\")\n# [END list]\n\n\n\n# [START add]\n'''\ncurl -v -XPOST http://localhost:8080/books/saldo --header \"Content-Type: application/json\" --data '{\"airport\":\"arlanda\",\"saldo\":\"5\"}'\n\n'''\n\n@crud.route('/saldo', methods=['POST'])\ndef add():\n if request.method == 'POST':\n print('Adding saldo to db')\n content = request.get_json(silent=True)\n airport=\"non\"\n for key in content:\n if key == \"airport\":\n airport=content[key]\n print(content[key])\n content['timestamp']=datetime.datetime.now()\n doc_ref = db.collection(u'saldo').document(airport+\"_\"+str(datetime.datetime.now()))\n doc_ref.set(content\n )\n\n return render_template(\"form.html\")\n# [END add]\n# [START add]\n'''\n\ncurl -v -XPOST http://localhost:8080/books/action --header \"Content-Type: application/json\" --data '{\"airport\":\"arlanda\",\"run\":\"name\",\"data\":{\"temp\":\"34\",\"brushlenght\":\"20\",\"power\":\"300\"}}'\n\n'''\n\n\n\n@crud.route('/action', methods=['POST'])\ndef action():\n if request.method == 'POST':\n print('Action log')\n content = request.get_json(silent=True)\n airport=\"non\"\n for key in content:\n if key == \"airport\":\n airport=content[key]\n print(content[key])\n content['timestamp']=datetime.datetime.now()\n doc_ref = db.collection(u'action').document(airport+\"_\"+str(datetime.datetime.now()))\n doc_ref.set(content\n )\n\n return render_template(\"form.html\")\n\n@crud.route('//edit', methods=['GET', 'POST'])\ndef edit(id):\n book = get_model().read(id)\n\n if request.method == 'POST':\n data = request.form.to_dict(flat=True)\n\n book = get_model().update(data, id)\n\n return redirect(url_for('.view', id=book['id']))\n\n return render_template(\"form.html\", action=\"Edit\", book=book)\n\n\n\n", "sub_path": "api/backend/crud.py", "file_name": "crud.py", "file_ext": "py", "file_size_in_byte": 3059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Blueprint", "line_number": 23, "usage_type": "call"}, {"api_name": "firebase_admin.credentials.Certificate", "line_number": 25, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 25, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 26, "usage_type": "call"}, {"api_name": "firebase_admin.firestore.client", "line_number": 27, "usage_type": "call"}, {"api_name": "firebase_admin.firestore", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "59388924", "text": "import pandas as pd\nimport pylab as pl\nimport numpy as np\nimport scipy.optimize as opt\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n\nimport matplotlib.pyplot as plt\n\n#Read in Cancer data files\ncsv_path = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/cell_samples.csv'\ncell_df = pd.read_csv(csv_path)\n\n#plot Clump vs UnifSize with the dependent variable being 2(benign) or 4(maligant)\nax = cell_df[cell_df['Class'] == 4][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='DarkBlue', label='malignant');\ncell_df[cell_df['Class'] == 2][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='Yellow', label='benign', ax=ax);\nplt.savefig('SVM.png')\n\n#----------preprocessing----------------------------------------------\n\n#list out all atrributes\n\n\n#Drop nonnumerical rows in BareNuc attribute then convert remaining to int\ncell_df = cell_df[pd.to_numeric(cell_df['BareNuc'], errors='coerce').notnull()]\ncell_df['BareNuc'] = cell_df['BareNuc'].astype('int')\n\n\n#create another df just of independent variables, hence the double square brakets\nfeature_df = cell_df[['Clump', 'UnifSize', 'UnifShape', 'MargAdh', 'SingEpiSize', 'BareNuc', 'BlandChrom', 'NormNucl', 'Mit']]\nX = np.asarray(feature_df) #convert to nparray\nprint(X[0:5])\n\n#nparray of dependent variable\ncell_df['Class'] = cell_df['Class'].astype('int')\ny = np.asarray(cell_df['Class'])\nprint(y[0:5])\n\n#Train, test\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)\nprint ('Train set:', X_train.shape, y_train.shape)\nprint ('Test set:', X_test.shape, y_test.shape)\n\n\n#------------------Modeling----------------------------------------------------\n\nfrom sklearn import svm\n\n#Use default equation to create our seperator\nclf = svm.SVC(kernel='rbf')\nclf.fit(X_train, y_train)\n\n#predict new values\nyhat = clf.predict(X_test)\nprint(yhat[0:5])\n\n\n#------------------Evaluation--------------------------------------------------\n\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport itertools\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('CF.png')\n\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat, labels=[2,4])\nnp.set_printoptions(precision=2)\n\nprint (classification_report(y_test, yhat))\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix')\n\n\n\n#Plotting the F1-score\nfrom sklearn.metrics import f1_score\nf1_score(y_test, yhat, average='weighted')\n\n\n#Using jaccard to measure accuracy\nfrom sklearn.metrics import jaccard_similarity_score\njaccard_similarity_score(y_test, yhat)\n", "sub_path": "SVM.py", "file_name": "SVM.py", "file_ext": "py", "file_size_in_byte": 3835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "pandas.to_numeric", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 72, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.metrics.jaccard_similarity_score", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "582934993", "text": "\nfrom tkinter import *\nfrom tkinter import ttk, colorchooser, filedialog, messagebox\nimport tkinter.messagebox\nimport PIL.ImageGrab as ImageGrab\n\nclass main:\n\n def __init__(self, master):\n self.master = master\n self.penwidth = 5\n self.color_bg = 'white'\n self.color_fg = 'black'\n self.drawwidgets()\n self.setup()\n self.c.bind('', self.paint) # drwaing the line\n self.c.bind('', self.reset)\n\n def changeW(self, e):\n self.penwidth = e\n\n def clear(self):\n self.c.delete(ALL)\n\n def paint(self, e):\n paint_color = self.color_bg if self.eraser_on else self.color_fg\n if self.old_x and self.old_y:\n self.c.create_line(self.old_x, self.old_y, e.x, e.y,\n width=self.penwidth, fill=paint_color,\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n self.old_x = e.x\n self.old_y = e.y\n\n def erase(self):\n self.activate_button(self.eraser, eraser_mode=True)\n\n def penf(self):\n self.activate_button(self.pen)\n\n def reset(self, e): # reseting or cleaning the canvas\n self.old_x = None\n self.old_y = None\n\n def change_fg(self): # changing the pen color\n self.color_fg = colorchooser.askcolor(color=self.color_fg)[1]\n\n def change_bg(self): # changing the background color canvas\n self.color_bg = colorchooser.askcolor(color=self.color_bg)[1]\n self.c['bg'] = self.color_bg\n self.clear()\n\n def activate_button(self, some_button, eraser_mode=False):\n self.active_button.config(relief=RAISED)\n some_button.config(relief=SUNKEN)\n self.active_button = some_button\n self.eraser_on = eraser_mode\n\n def msg(self):\n tkinter.messagebox.showinfo(\n 'About Paint Application', 'This is a paint aplication which provides you with features such as changing background and brush colors. It also provides you with a slider to change pen width.')\n\n def about(self):\n tkinter.messagebox.showinfo(\n \"Paint Application Developer\", \"Kushal Nitin Lahoti MIS :- 111803179\")\n\n def save_it(self):\n\n try:\n filename = filedialog.asksaveasfilename(defaultextension='.jpg')\n ImageGrab.grab().save(filename)\n messagebox.showinfo('Paint says', 'image is saved as ' + str(filename))\n\n except:\n messagebox.showerror('Paint says', 'unable to save image, \\n something went wrong')\n\n def save_it_destroy(self):\n\n try:\n filename = filedialog.asksaveasfilename(defaultextension='.jpg')\n ImageGrab.grab().save(filename)\n messagebox.showinfo('Paint says', 'image is saved as ' + str(filename))\n self.root.destroy()\n\n except:\n messagebox.showerror('Paint says', 'unable to save image, \\n something went wrong')\n\n def drawwidgets(self):\n self.controls = Frame(self.master, height=1000, width=140)\n self.label = Label(self.controls, text='Width',font=('Times 15'), fg='red')\n self.label.place(x=10, y=280)\n self.slider = ttk.Scale(self.controls, from_=5,to=100, command=self.changeW, orient=VERTICAL)\n self.slider.set(self.penwidth)\n self.slider.place(x=80, y=250)\n self.controls.pack(side=LEFT)\n self.pen = Button(self.controls, text='Pen',font=('Times 12'), command=self.penf)\n self.pen.place(x=15, y=200)\n self.eraser = Button(self.controls, text='Eraser',font=('Times 12'), command=self.erase)\n self.eraser.place(x=75, y=200)\n self.c = Canvas(self.master, width=500, height=400, bg=self.color_bg)\n self.c.pack(fill=BOTH, expand=True)\n\n menu = Menu(self.master)\n self.master.config(menu=menu)\n\n filemenu = Menu(menu, tearoff = 0)\n menu.add_cascade(label='File', menu=filemenu)\n filemenu.add_command(label='Save', command=self.save_it)\n filemenu.add_command(label='Save and Exit', command=self.save_it_destroy)\n\n color = Menu(menu, tearoff=0)\n menu.add_cascade(label='Colors', menu=color)\n color.add_command(label='Brush Color', command=self.change_fg)\n color.add_command(label='Background Color', command=self.change_bg)\n\n option = Menu(menu, tearoff=0)\n menu.add_cascade(label='Options', menu=option)\n option.add_command(label='Clear Canvas', command=self.clear)\n option.add_command(label='Exit', command=self.master.destroy)\n\n help_option = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Help\", menu=help_option)\n #help_option.add_command(label=\"Features\", command=self.features_msg)\n help_option.add_command(label=\"About Paint Application\", command=self.msg)\n help_option.add_command(label=\"Develpoers\", command=self.about)\n\n def setup(self):\n\n self.old_x = None\n self.old_y = None\n self.eraser_on = False\n self.active_button = self.pen\n\n\nif __name__ == '__main__':\n root = Tk()\n main(root)\n root.geometry('900x600')\n root.title('Paint Application')\n root.mainloop()\n\n\n", "sub_path": "Assignment 6_PaintApp/Paint_app.py", "file_name": "Paint_app.py", "file_ext": "py", "file_size_in_byte": 5242, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tkinter.colorchooser.askcolor", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.colorchooser", "line_number": 45, "usage_type": "name"}, {"api_name": "tkinter.colorchooser.askcolor", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.colorchooser", "line_number": 48, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 69, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 70, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 71, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 71, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 74, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 74, "usage_type": "name"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 79, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 79, "usage_type": "name"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 80, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 80, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 81, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 81, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 85, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 85, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scale", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "533320268", "text": "import netifaces\nimport socket\nfrom smtplib import SMTP_SSL as SMTP\nfrom email.mime.text import MIMEText\n\n\nintInfo = list();\nfrom_addr = \"XXXX@gmail.com\"\nto_addrs = \"XXXX@gmail.com\"\ncontent = \"\"\n\nintList = netifaces.interfaces()\n\nfor intf in intList:\n try:\n addr = netifaces.ifaddresses(intf)\n content += (intf + \"\\n\")\n content += (\" IP address:\\t\"+addr[netifaces.AF_INET][0]['addr']+\"\\n\")\n content += (\"Subnet Mask:\\t\"+addr[netifaces.AF_INET][0]['netmask'] + \"\\n\\n\")\n except KeyError:\n content += (\"No IP address found on \"+intf+\"\\n\\n\")\n \n\nmsg = MIMEText(content, 'plain')\nmsg['Subject'] = \"--Network Information from \"+socket.gethostname()+\"--\"\nmsg['From'] = from_addr\n\nser = SMTP(\"smtp.gmail.com:465\")\nser.ehlo()\nser.login(\"XXXX\", \"XXXX\")\nser.sendmail(from_addr, to_addrs, msg.as_string())\nser.quit()", "sub_path": "mailing/mailing.py", "file_name": "mailing.py", "file_ext": "py", "file_size_in_byte": 851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "netifaces.interfaces", "line_number": 12, "usage_type": "call"}, {"api_name": "netifaces.ifaddresses", "line_number": 16, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 18, "usage_type": "attribute"}, {"api_name": "netifaces.AF_INET", "line_number": 19, "usage_type": "attribute"}, {"api_name": "email.mime.text.MIMEText", "line_number": 24, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 25, "usage_type": "call"}, {"api_name": "smtplib.SMTP_SSL", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "452409097", "text": "import copy\nimport logging\nimport os.path\nimport jinja2\nimport yaml\nimport jsonpatch\nimport json\nfrom collections import OrderedDict\nimport kpm.manifest as manifest\nfrom kpm.template_filters import jinja_filters\nfrom kpm.kub_base import KubBase\nfrom kpm.kubernetes import get_endpoint\nfrom kpm.utils import convert_utf8\n\n\n# __all__ = ['Kub']\n\nlogger = logging.getLogger(__name__)\n\n\n_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n\njinja_env = jinja2.Environment()\njinja_env.filters.update(jinja_filters())\n\n\nclass Kub(KubBase):\n def __init__(self, *args, **kwargs):\n super(Kub, self).__init__(*args, **kwargs)\n self.manifest = manifest.Manifest(self.package)\n\n @property\n def kubClass(self):\n return Kub\n\n def _create_namespaces(self, resources):\n # @TODO create namespaces for all manifests\n if self.namespace:\n ns = self.create_namespace(self.namespace)\n resources[ns['file']] = ns\n return resources\n\n def _append_patch(self, resources={}):\n index = 0\n\n for resource in self.manifest.resources:\n index += 1\n resources[resource['file']] = resource\n resource[\"order\"] = index\n if 'protected' not in resource:\n resource[\"protected\"] = False\n if 'patch' not in resource:\n resource['patch'] = []\n\n if self._deploy_resources is not None:\n for resource in self._deploy_resources:\n if 'patch' in resource and len(resource['patch']) > 0:\n resources[resource['file']][\"patch\"] += resource['patch']\n\n return resources\n\n def _generate_shards(self, resources):\n if not len(self.shards):\n return resources\n sharded = {}\n to_remove = []\n index = 0\n for _, resource in resources.iteritems():\n index += 1\n resource['order'] = index\n if 'sharded' in resource and resource['sharded'] is True:\n for shard in self.shards:\n shard_vars = shard.get('variables', {})\n shard_vars.update({\"name\": shard['name']})\n\n r = {\"file\": \"%s-%s.yaml\" % (os.path.splitext(resource['file'])[0].replace(\"/\", \"_\"),\n shard['name']),\n \"order\": index,\n \"protected\": False,\n \"template\": resource['file'],\n \"variables\": shard_vars,\n \"patch\": resource['patch'] + shard.get('patch', []),\n \"name\": \"%s-%s\" % (resource['name'], shard['name']),\n \"type\": resource['type']}\n sharded[r['file']] = r\n index += 1\n to_remove.append(resource['file'])\n map(resources.pop, to_remove)\n resources.update(sharded)\n return resources\n\n def _default_patch(self, resources):\n for _, resource in resources.iteritems():\n patch = [\n {\"op\": \"replace\",\n \"path\": \"/metadata/name\",\n \"value\": resource['name']},\n ]\n if 'patch' not in resource:\n resource['patch'] = []\n resource['patch'] += patch\n return resources\n\n def _resolve_jinja(self, resources, from_value=False):\n for _, resource in resources.iteritems():\n if 'template' in resource:\n tpl_file = resource['template']\n else:\n tpl_file = resource['file']\n if from_value or resource.get('generated', False) is True:\n val = yaml.safe_dump(convert_utf8(resource['value']), width=float(\"inf\"))\n else:\n val = self.package.files[os.path.join('templates', tpl_file)]\n template = jinja_env.from_string(val)\n variables = copy.deepcopy(self.variables)\n if 'variables' in resource:\n variables.update(resource['variables'])\n if len(self.shards):\n variables['kpmshards'] = self.shards\n t = template.render(variables)\n resource['value'] = yaml.safe_load(t)\n return resources\n\n def _apply_patches(self, resources):\n for _, resource in resources.iteritems():\n if self.namespace:\n if 'namespace' in resource['value']['metadata']:\n op = 'replace'\n else:\n op = 'add'\n resource['patch'].append({\"op\": op, \"path\": \"/metadata/namespace\", \"value\": self.namespace})\n\n if len(resource['patch']):\n patch = jsonpatch.JsonPatch(resource['patch'])\n result = patch.apply(resource['value'])\n resource['value'] = result\n return resources\n\n def resources(self):\n if self._resources is None:\n self._resources = OrderedDict()\n resources = self._resources\n resources = self._create_namespaces(resources)\n resources = self._append_patch(resources)\n resources = self._generate_shards(resources)\n resources = self._default_patch(resources)\n resources = self._resolve_jinja(resources)\n resources = self._apply_patches(resources)\n resources = self._resolve_jinja(resources, True)\n return self._resources\n\n def prepare_resources(self, dest=\"/tmp\", index=0):\n for _, resource in self.resources().iteritems():\n index += 1\n path = os.path.join(dest, \"%02d_%s_%s\" % (index,\n self.version,\n resource['file'].replace(\"/\", \"_\")))\n f = open(path, 'w')\n f.write(yaml.safe_dump(convert_utf8(resource['value'])))\n resource['filepath'] = f.name\n f.close()\n return index\n\n def build(self):\n result = []\n for kub in self.dependencies:\n kubresources = OrderedDict([(\"package\", kub.name),\n (\"version\", kub.version),\n (\"namespace\", kub.namespace),\n (\"resources\", [])])\n for _, resource in kub.resources().iteritems():\n resource = self._annotate_resource(kub, resource)\n\n kubresources['resources'].\\\n append(OrderedDict({\"file\": resource['file'],\n \"hash\": resource['value']['metadata']['annotations'].get('kpm.hash', None),\n \"protected\": resource['protected'],\n \"name\": resource['name'],\n \"kind\": resource['value']['kind'].lower(),\n \"endpoint\": get_endpoint(\n resource['value']['kind'].lower()).\n format(namespace=self.namespace),\n \"body\": json.dumps(resource['value'])}))\n\n result.append(kubresources)\n return {\"deploy\": result,\n \"package\": {\"name\": self.name,\n \"version\": self.version}}\n", "sub_path": "kpm/kub.py", "file_name": "kub.py", "file_ext": "py", "file_size_in_byte": 7402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "yaml.resolver", "line_number": 21, "usage_type": "attribute"}, {"api_name": "jinja2.Environment", "line_number": 24, "usage_type": "call"}, {"api_name": "kpm.template_filters.jinja_filters", "line_number": 25, "usage_type": "call"}, {"api_name": "kpm.kub_base.KubBase", "line_number": 28, "usage_type": "name"}, {"api_name": "kpm.manifest.Manifest", "line_number": 31, "usage_type": "call"}, {"api_name": "kpm.manifest", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.path.splitext", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 77, "usage_type": "name"}, {"api_name": "yaml.safe_dump", "line_number": 112, "usage_type": "call"}, {"api_name": "kpm.utils.convert_utf8", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 114, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 116, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 122, "usage_type": "call"}, {"api_name": "jsonpatch.JsonPatch", "line_number": 135, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 156, "usage_type": "name"}, {"api_name": "yaml.safe_dump", "line_number": 160, "usage_type": "call"}, {"api_name": "kpm.utils.convert_utf8", "line_number": 160, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 168, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 176, "usage_type": "call"}, {"api_name": "kpm.kubernetes.get_endpoint", "line_number": 181, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 184, "usage_type": "call"}]} +{"seq_id": "437875581", "text": "from time import time\nfrom typing import Optional\n\nfrom httpx import Client\n\n\nclass HTTPClient:\n def __init__(\n self,\n base_url: str,\n default_headers: Optional[dict] = None,\n default_params: Optional[dict] = None,\n ):\n self.base_url = base_url\n self.default_headers = default_headers or {}\n self.default_params = default_params or {}\n\n self.http_client = Client(\n base_url=self.base_url, headers=default_headers, params=self.default_params\n )\n\n def get(self, url: str, params: dict, headers: dict = None):\n custom_headers = headers or {}\n\n if not params.get(\"_rticket\"):\n params[\"_rticket\"] = int(round(time() * 1000))\n\n response = self.http_client.get(url=url, params=params, headers=custom_headers)\n\n return response\n\n def post(self, url: str, data: dict, headers: dict = None):\n custom_headers = headers or {}\n\n rticket = int(round(time() * 1000))\n\n response = self.http_client.post(\n url=url, params={\"_rticket\": rticket}, data=data, headers=custom_headers\n )\n\n return response\n", "sub_path": "tiktok_bot/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.Optional", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 12, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "413247640", "text": "\"\"\"\n python3 readgraph.py [ digraph-file ]\n\nTakes a csv (comma separated values) text file containing the vertices\nand edges of a street digraph and converts it into a digraph instance.\n\nIf the optional argument digraph-file is supplied, reads that, otherwise\ntakes input from stdin\n\"\"\"\n# import sys\n\n# # throw away executable name before processing command line arguments\n# argv = sys.argv[1:]\n\n# # if filename is supplied, use that, otherwise use stdin\n# if argv:\n# digraph_file_name = argv.pop(0)\n# digraph_file = open(digraph_file_name, 'r')\n# else:\n# digraph_file = sys.stdin\n\n# For testing, just use a simple representation of set of vertices, set of\n# edges as ordered pairs, and dctionaries that map\n# vertex to (lat,long)\n# edge to street name\n\nimport logging\nfrom digraph import Digraph\n\n\ndef readgraph(digraph_file_name):\n # create logger\n readgraph_logger = logging.getLogger('MappingServer.readgraph')\n\n readgraph_logger.info(\"Opening graphfile:\" + str(digraph_file_name))\n digraph_file = open(digraph_file_name, 'r')\n readgraph_logger.info(\"Open successful.\")\n\n V = set()\n E = set()\n V_coord = {}\n E_name = {}\n\n G = Digraph()\n\n readgraph_logger.info(\"Parsing file...\")\n # process each line in the file\n for line in digraph_file:\n\n # strip all trailing whitespace\n line = line.rstrip()\n\n fields = line.split(\",\")\n type = fields[0]\n\n if type == 'V':\n # got a vertex record\n (id, lat, long) = fields[1:]\n\n # vertex id's should be ints\n id = int(id)\n\n # lat and long are floats\n lat = float(lat)\n long = float(long)\n\n V.add(id)\n V_coord[id] = (lat, long)\n\n elif type == 'E':\n # got an edge record\n (start, stop, name) = fields[1:]\n\n # vertices are ints\n start = int(start)\n stop = int(stop)\n e = (start, stop)\n\n # get rid of leading and trailing quote \" chars around name\n name = name.strip('\"')\n\n # consistency check, we don't want auto adding of vertices when\n # adding an edge.\n if start not in V or stop not in V:\n readgraph_logger.error(\"Edge {} has an endpoint that is not a vertex\".format(e))\n raise Exception(\"Edge {} has an endpoint that is not a vertex\".format(e))\n\n G.add_edge(e)\n E_name[e] = name\n else:\n # weird input\n readgraph_logger.error(\"Error: weird line |{}|\".format(line))\n raise Exception(\"Error: weird line |{}|\".format(line))\n\n readgraph_logger.info(\"Parsing finished.\")\n readgraph_logger.debug(\"Graph has \" + str(G.num_vertices()) + \" vertices and \" + str(G.num_edges()) + \" edges\")\n\n V_Rev = {}\n\n for key in V_coord:\n V_Rev[key] = (int(V_coord[key][0] * 100000), int(V_coord[key][1] * 100000))\n\n V_coord_rev = dict([(v, k) for (k, v) in V_Rev.items()])\n\n names = (V_coord, E_name, V_coord_rev)\n\n return (G, names)\n", "sub_path": "readgraph.py", "file_name": "readgraph.py", "file_ext": "py", "file_size_in_byte": 3090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "digraph.Digraph", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "169804472", "text": "import sys\nfrom datetime import datetime\nimport time\nimport re\n\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.internet import reactor\nfrom twisted.internet.protocol import Factory, ClientFactory\nfrom twisted.web.client import getPage\n\nfrom project_config import API_KEY\n\n# Server info dictionary containing port numbers and all adjacent servers for\n# each server in the herd.\nserver_info = {\n\t'Alford' \t: { 'port' : 12000, 'adjacent_servers' : ['Parker', 'Powell'] \t\t\t\t},\n\t'Bolden' \t: { 'port' : 12001, 'adjacent_servers' : ['Parker', 'Powell'] \t\t\t\t},\n\t'Hamilton' \t: { 'port' : 12002, 'adjacent_servers' : ['Parker'] \t\t\t\t\t\t},\n\t'Parker' \t: { 'port' : 12003, 'adjacent_servers' : ['Alford', 'Bolden', 'Hamilton'] \t},\n\t'Powell' \t: { 'port' : 12004, 'adjacent_servers' : ['Alford', 'Bolden'] \t\t\t\t}\n}\n\n# Server protocol for the proxy herd\nclass ProxyHerdProtocol(LineReceiver):\n\tdef __init__(self, factory):\n\t\tself.factory = factory\n\t\tself.name = None\n\t\tself.connectionID = -1\n\t\tself.GooglePlacesURL = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?'\n\n\n\tdef connectionMade(self):\n\t\tself.connectionID = self.factory.numConnectionsReceived\n\t\tself.factory.numConnectionsReceived += 1\n\t\tself.logMessage('CONNECTION #' + str(self.connectionID) + ' made. Time: ' + str(datetime.now()))\n\n\n\tdef connectionLost(self, reason):\n\t\tself.logMessage('CONNECTION #' + str(self.connectionID) + ' lost. Time: ' + str(datetime.now()))\n\n\n\tdef lineReceived(self, msg):\n\t\tself.logMessage('RECEIVED message: ' + msg)\n\t\t# Splits the message by whitespace\n\t\tmsg_list = msg.split()\n\t\tif (msg_list == []):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\t# Determine the type of command\n\t\tcmd_name = msg_list[0]\n\t\tif (cmd_name == 'IAMAT' and len(msg_list) == 4):\n\t\t\tself.processIAMATcommand(msg)\n\t\telif (cmd_name == 'WHATSAT' and len(msg_list) == 4):\n\t\t\tself.processWHATSATcommand(msg)\n\t\telif (cmd_name == 'AT' and len(msg_list) == 8):\n\t\t\tself.processATcommand(msg)\n\t\telif (cmd_name == 'INIT_QUERY' and len(msg_list) == 3):\n\t\t\tself.processINIT_QUERYcommand(msg)\n\t\telse:\n\t\t\tself.processInvalidCommand(msg)\n\n\n\tdef processInvalidCommand(self, msg):\n\t\tInvldResponse = '? ' + str(msg)\n\t\tself.sendLine(InvldResponse)\n\t\tself.logMessage('SENT invalid command notification: ' + InvldResponse)\n\n\n\t# Command received from adjacent server who has just come online and wants to\n\t# obtain existing user location information\n\tdef processINIT_QUERYcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\tif (msg_list[1] != 'FROM' or\n\t\t\tmsg_list[2] not in server_info[self.factory.serverID]['adjacent_servers']):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\tsender_serverID = msg_list[2]\n\t\tfor ATmessage in self.factory.users.values():\n\t\t\treactor.connectTCP('localhost', server_info[sender_serverID]['port'],\n\t\t\t\t\t\t\t\tLocationPropagationFactory(self.factory.serverID, ATmessage))\n\t\t\tself.logMessage('SENT location information to server ' + sender_serverID +\n\t\t\t\t\t\t\t' following INIT_QUERY: ' + ATmessage)\n\n\n\tdef processIAMATcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\t# Match for the latitude and longitude\n\t\tlatlon_match = re.match('^(\\+|-)\\d+.\\d+(\\+|-)\\d+.\\d+$', msg_list[2])\n\t\t# Match for the time the client thinks it sent the message\n\t\ttime_match = re.match('^\\d+.\\d+$', msg_list[3])\n\t\tif (latlon_match == None or time_match == None):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\t# Calculate the time difference between the server's idea of when it\n\t\t# received the message and the client's timestamp\n\t\ttime_diff = time.time() - float(time_match.group())\n\t\ttime_diff_sign = ''\n\t\tif time_diff >= 0:\n\t\t\ttime_diff_sign = '+'\n\t\tIAMATdata = ' '.join(msg_list[1:])\n\t\t# Formulate the AT response message\n\t\tATresponse = (\t'AT ' + self.factory.serverID + ' ' +\n\t\t\t\t\t\ttime_diff_sign + str(time_diff) + ' ' + IAMATdata\t)\n\t\t# Set the protocols name to the client ID\n\t\tself.name = msg_list[1]\n\t\t# Set an entry in the users dictionary with the client ID as the key\n\t\t# and the At response message as the value\n\t\tself.factory.users[self.name] = ATresponse\n\t\tself.sendLine(ATresponse)\n\t\tself.logMessage('SENT AT response to user ' + self.name + ' following IAMAT command: ' + ATresponse)\n\t\t# Propagate the AT response to adjacent servers\n\t\tself.propagateLocationUpdate(ATresponse)\n\n\t\t\t\n\tdef processWHATSATcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\t# Match on the radius and the upper bound of information to provide\n\t\t# (not actually used)\n\t\tif (re.match('^\\d+$', msg_list[2]) == None or\n\t\t\tre.match('^\\d+$', msg_list[3]) == None):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\tother_client_name = msg_list[1]\n\t\t# Check that the other client name provided is actually one of this\n\t\t# server's users\n\t\tif self.factory.users.has_key(other_client_name) == False:\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\t# Get the appropriate AT response to send back to the client\n\t\tATresponse = self.factory.users[other_client_name]\n\t\tself.sendLine(ATresponse)\n\t\tself.logMessage('SENT AT response to user following WHATSAT command: ' + ATresponse)\n\n\t\t# Match for the latitude and longitude\n\t\tlatlon_match = re.match('^((\\+|-)\\d+.\\d+)((\\+|-)\\d+.\\d+)$', ATresponse.split()[4])\n\t\t# Formulate the Google Places query URL\n\t\tpageURL = self.GooglePlacesURL + 'location=' + latlon_match.group(1) + ',' + latlon_match.group(3) + '&' + 'radius=' + msg_list[2] + '&' + 'sensor=false&' + 'key=' + API_KEY\n\t\td = getPage(pageURL)\n\t\td.addCallbacks(self.sendGooglePlacesResponse, self.sendGooglePlacesErrorNotification)\n\n\tdef sendGooglePlacesResponse(self, response):\n\t\t# Replace every sequence of two or more newlines with a single newline\n\t\tresponse = re.sub('\\n\\n+', '\\n', response)\n\t\t# Replace all trailing newline with two newlines\n\t\tresponse = re.sub('\\n*$', '\\n', response)\n\t\tself.sendLine(response)\n\t\tself.logMessage('SENT Google Places response message:\\n' + response)\n\n\t# Send an error notification if the Google Places query failed\n\tdef sendGooglePlacesErrorNotification(self, response):\n\t\terror_msg = 'Error: Could not retrieve Google Places information for the given request.'\n\t\tself.sendLine(error_msg)\n\t\tself.logMessage('SENT Google Places error notification: ' + error_msg)\n\n\n\tdef processATcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\t# Check that the AT message is valid\n\t\t# - valid server ID\n\t\t# - valid time difference\n\t\t# - valid latitude and longitude\n\t\t# - valid user timestamp\n\t\t# - FROM valid server ID\n\t\tif (not server_info.has_key(msg_list[1]) or\n\t\t\tre.match('^(\\+|-)\\d+.\\d+$', msg_list[2]) == None or\n\t\t\tre.match('^(\\+|-)\\d+.\\d+(\\+|-)\\d+.\\d+$', msg_list[4]) == None or\n\t\t\tre.match('^\\d+.\\d+$', msg_list[5]) == None or\n\t\t\tmsg_list[6] != 'FROM' or\n\t\t\tmsg_list[7] not in server_info[self.factory.serverID]['adjacent_servers']):\n\t\t\tself.sendLine('Error: Invalid AT message sent to server ' + self.factory.serverID + '.')\n\t\t\tself.logMessage('ERROR: Invalid AT message received from server.')\n\t\t\treturn\n\n\t\tsender_serverID = msg_list[7]\n\t\tclientID = msg_list[3]\n\t\tmsg = ' '.join(msg_list[:6])\n\t\t# If the information for the client ID present in the AT message is\n\t\t# already present in this server's users dictionary, don't update\n\t\t# or propagate\n\t\tif self.factory.users.has_key(clientID) and self.factory.users[clientID] == msg:\n\t\t\tself.logMessage('IGNORED the propagated location update from ' + sender_serverID + '.')\n\t\t\treturn\n\n\t\t# Update user information\n\t\tself.factory.users[clientID] = msg\n\t\t# Propagate AT message\n\t\tself.propagateLocationUpdate(msg)\n\n\t# Propagates a location update to adjacent servers\n\tdef propagateLocationUpdate(self, ATmessage):\n\t\tadjacent_servers = server_info[self.factory.serverID]['adjacent_servers']\n\t\tfor s in adjacent_servers:\n\t\t\treactor.connectTCP('localhost', server_info[s]['port'], LocationPropagationFactory(self.factory.serverID, ATmessage))\n\t\t\tself.logMessage('PROPAGATED location update to server ' + s + ': ' + ATmessage)\n\n\tdef logMessage(self, msg):\n\t\tlogfile = open(self.factory.logfilename, 'a')\n\t\tlogfile.write(msg + '\\n\\n')\n\t\tlogfile.close()\n\n\n# Server factory\nclass ProxyHerdFactory(Factory):\n\tdef __init__(self, serverID):\n\t\tself.users = {}\n\t\tself.serverID = serverID\n\t\tself.numConnectionsReceived = 0\n\t\tself.logfilename = serverID + '-' + str(datetime.now()) + '.log'\n\t\tprint('Initializing server...\\nCreating logfile \\\"' + self.logfilename + '\\\".')\n\t\t# Create logfile\n\t\tlogfile = open(self.logfilename, 'w')\n\t\t# Query adjacent servers for existing user location information\n\t\tprint('Querying adjacent servers for existing user location information...\\n')\n\t\tadjacent_servers = server_info[self.serverID]['adjacent_servers']\n\t\tfor s in adjacent_servers:\n\t\t\treactor.connectTCP('localhost', server_info[s]['port'],\n\t\t\t\t\t\t\t\tLocationPropagationFactory(self.serverID, 'INIT_QUERY'))\n\t\t\tlogfile.write(\t'SENT initial user location information query to server ' +\n\t\t\t\t\t\t\ts + ': ' + 'INIT_QUERY FROM ' + self.serverID + '\\n\\n')\n\n\t\tlogfile.close()\t\t\t\n\n\n\tdef buildProtocol(self, addr):\n\t\treturn ProxyHerdProtocol(self)\n\n\n# Client protocol for the proxy herd for propagating an update or initially\n# querying an adjacent server for user location information upon coming online\nclass LocationPropagationProtocol(LineReceiver):\n\tdef __init__(self, factory):\n\t\tself.factory = factory\n\t\n\tdef connectionMade(self):\n\t\tself.sendLine(self.factory.ATmessage + ' FROM ' + self.factory.sender_serverID)\n\t\tself.transport.loseConnection()\n\n\n# Client factory for propagating location information, both for sending updates\n# and for querying adjacent servers for existing user location information\n# upon coming online.\n# Needs to inherit from \"ClientFactory\" in order to use with \"connectTCP\"\n# function.\nclass LocationPropagationFactory(ClientFactory):\n\tdef __init__(self, sender_serverID, ATmessage):\n\t\t# Server ID of the server who initiated the connection\n\t\tself.sender_serverID = sender_serverID\n\t\tself.ATmessage = ATmessage\n\n\tdef startedConnecting(self, connector):\n\t\treturn\n\n\tdef buildProtocol(self, addr):\n\t\treturn LocationPropagationProtocol(self)\n\n\tdef clientConnectionLost(self, connector, reason):\n\t\treturn\n\n\tdef clientConnectionFailed(self, connector, reason):\n\t\treturn\n\n\ndef main():\n\tif len(sys.argv) != 2:\n\t\tprint('Usage: python proxyherd.py serverID')\n\t\texit()\n\n\tserverID = str(sys.argv[1])\n\n\tif not server_info.has_key(serverID):\n\t\tprint('Error: Invalid serverID')\n\t\texit()\n\t\n\treactor.listenTCP(server_info[serverID]['port'], ProxyHerdFactory(serverID))\n\treactor.run()\n\nif __name__ == '__main__':\n\tmain()\n\n", "sub_path": "proxyherd.py", "file_name": "proxyherd.py", "file_ext": "py", "file_size_in_byte": 10419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "twisted.protocols.basic.LineReceiver", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 81, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 81, "usage_type": "name"}, {"api_name": "re.match", "line_number": 90, "usage_type": "call"}, {"api_name": "re.match", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "re.match", "line_number": 122, "usage_type": "call"}, {"api_name": "re.match", "line_number": 123, "usage_type": "call"}, {"api_name": "re.match", "line_number": 140, "usage_type": "call"}, {"api_name": "project_config.API_KEY", "line_number": 142, "usage_type": "name"}, {"api_name": "twisted.web.client.getPage", "line_number": 143, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 148, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 150, "usage_type": "call"}, {"api_name": "re.match", "line_number": 170, "usage_type": "call"}, {"api_name": "re.match", "line_number": 171, "usage_type": "call"}, {"api_name": "re.match", "line_number": 172, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 198, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 198, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.Factory", "line_number": 208, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 213, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 221, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 221, "usage_type": "name"}, {"api_name": "twisted.protocols.basic.LineReceiver", "line_number": 235, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.ClientFactory", "line_number": 249, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 269, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 273, "usage_type": "attribute"}, {"api_name": "twisted.internet.reactor.listenTCP", "line_number": 279, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 279, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.run", "line_number": 280, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 280, "usage_type": "name"}]} +{"seq_id": "454879132", "text": "import logging\n\nfrom suds.client import Client, WebFault\n\nfrom rest_framework import generics, status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom django.conf import settings\n\nfrom bonus_cards.models import BonusCard\nfrom bonus_cards.serializers import (\n BonusCardBalanceSerializer, BonusCardTransactionsSerializer,\n BonusCardGetUuidSerializer,\n)\n\nLOGGER = logging.getLogger(__name__)\n\nif settings.DEBUG:\n import logging\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('suds.client').setLevel(logging.DEBUG)\n logging.getLogger('suds.transport').setLevel(logging.DEBUG)\n logging.getLogger('suds.xsd.schema').setLevel(logging.DEBUG)\n logging.getLogger('suds.wsdl').setLevel(logging.DEBUG)\n\n\n@api_view(('GET',))\ndef api_root(request):\n return Response({\n 'get_uuid': reverse(\n 'bonus_cards:get_uuid', kwargs={\n 'bonus_program_uuid': 'BONUS_PROGRAM_UUID',\n 'card_number': 'CARD_NUMBER'\n }\n ),\n 'balance': reverse(\n 'bonus_cards:balance', kwargs={'uuid': 'CARD_UUID'}\n ),\n 'transactions': reverse(\n 'bonus_cards:transactions', kwargs={'uuid': 'CARD_UUID'}\n ),\n })\n\n\nclass BonusCardBaseView(generics.RetrieveAPIView):\n def get_wsdl_service(self, wsdl_client):\n raise NotImplementedError\n\n def get_serialized_data(self, wsdl_data):\n raise NotImplementedError\n\n def get_object(self):\n try:\n wsdl_client = Client(settings.ONE_C_WSDL,\n username=settings.ONE_C_WSDL_USER,\n password=settings.ONE_C_WSDL_PASSWORD)\n\n wsdl_response = self.get_wsdl_service(wsdl_client)\n\n except (WebFault, Exception) as e:\n LOGGER.error(e)\n wsdl_response = None\n\n return wsdl_response\n\n def retrieve(self, request, *args, **kwargs):\n wsdl_obj = self.get_object()\n if wsdl_obj:\n if u'Данные' in wsdl_obj:\n serialized_data = self.get_serialized_data(wsdl_obj[u'Данные'])\n # cache.set(request.path, serialized_data, 60)\n return Response(serialized_data)\n\n elif u'_Сообщение' in wsdl_obj:\n message = {'message': wsdl_obj[u'_Сообщение']}\n return Response(message, status.HTTP_404_NOT_FOUND)\n\n else:\n message = {'message': '1C error communication'}\n return Response(message, status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass BonusCardGetUuidView(BonusCardBaseView):\n serializer_class = BonusCardGetUuidSerializer\n\n def get_wsdl_service(self, wsdl_client):\n return wsdl_client.service.BonusCardGetID(\n self.kwargs.get('bonus_program_uuid'),\n self.kwargs.get('card_number'),\n )\n\n def get_serialized_data(self, wsdl_data):\n bonus_card = BonusCard(uuid=wsdl_data[u'Идентификатор'])\n serializer = self.get_serializer(bonus_card)\n return serializer.data\n\n\nclass BonusCardBalanceView(BonusCardBaseView):\n serializer_class = BonusCardBalanceSerializer\n\n def get_wsdl_service(self, wsdl_client):\n return wsdl_client.service.BonusCardInfo(self.kwargs.get('uuid'))\n\n def get_serialized_data(self, wsdl_data):\n balance = wsdl_data[u'Баллы']\n bonus_card = BonusCard(uuid=self.kwargs.get('uuid'), balance=balance)\n serializer = self.get_serializer(bonus_card)\n return serializer.data\n\n\nclass BonusCardTransactionsView(BonusCardBaseView):\n serializer_class = BonusCardTransactionsSerializer\n\n def get_wsdl_service(self, wsdl_client):\n return wsdl_client.service.BonusCardTransactions(\n self.kwargs.get('uuid')\n )\n\n def get_serialized_data(self, wsdl_data):\n transactions = []\n for transaction in wsdl_data[u'ИсторияОпераций']:\n transactions.append({\n 'period': transaction[u'Период'],\n 'balance': transaction[u'Баллы'],\n 'comment': transaction[u'Комментарий'],\n })\n\n bonus_card = BonusCard(uuid=self.kwargs.get('uuid'),\n transactions=transactions)\n serializer = self.get_serializer(bonus_card)\n return serializer.data\n", "sub_path": "src/bonus_cards/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4472, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 46, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 46, "usage_type": "name"}, {"api_name": "suds.client.Client", "line_number": 55, "usage_type": "call"}, {"api_name": "django.conf.settings.ONE_C_WSDL", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_C_WSDL_USER", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 56, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_C_WSDL_PASSWORD", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 57, "usage_type": "name"}, {"api_name": "suds.client.WebFault", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 77, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 77, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 81, "usage_type": "name"}, {"api_name": "bonus_cards.serializers.BonusCardGetUuidSerializer", "line_number": 85, "usage_type": "name"}, {"api_name": "bonus_cards.models.BonusCard", "line_number": 94, "usage_type": "call"}, {"api_name": "bonus_cards.serializers.BonusCardBalanceSerializer", "line_number": 100, "usage_type": "name"}, {"api_name": "bonus_cards.models.BonusCard", "line_number": 107, "usage_type": "call"}, {"api_name": "bonus_cards.serializers.BonusCardTransactionsSerializer", "line_number": 113, "usage_type": "name"}, {"api_name": "bonus_cards.models.BonusCard", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "429048619", "text": "from flask import Flask, render_template, request, send_file, url_for\nfrom werkzeug.utils import secure_filename\nimport numpy\nimport calendar\nimport time\nfrom custom_util import *\n\napp=Flask(__name__)\n\n# get running path\nbase_dir = os.path.dirname(__file__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/success\", methods=['POST'])\ndef success():\n if request.method=='POST':\n filestr=request.files[\"file\"]\n\n #convert string data to numpy array\n npimg = numpy.frombuffer(filestr.read(), numpy.uint8)\n\n # convert numpy array to image\n img = cv2.imdecode(npimg, cv2.COLOR_RGB2BGR)\n\n image_predicted = predict_Luna_Ju(img)\n\n file_to_save = str(calendar.timegm(time.gmtime()))\n\n cv2.imwrite(os.path.join(base_dir, 'static', file_to_save + '.jpg'), image_predicted)\n\n image_file = url_for('static', filename=file_to_save + '.jpg')\n\n\n return render_template(\"success.html\", img = image_file)\n\n\nif __name__ == '__main__':\n app.run(port=80)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.frombuffer", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 23, "usage_type": "attribute"}, {"api_name": "calendar.timegm", "line_number": 30, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "92752113", "text": "import fcntl\nimport sys\nimport os\nimport time\nimport tty\nimport termios\nimport random\nimport subprocess\nimport numpy as np\nimport cv2\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\n#from matplotlib import pyplot as plt\n\nclass RawStream(object):\n def __init__(self, stream):\n self.stream = stream\n self.fd = self.stream.fileno()\n def __enter__(self):\n self.original_stty = termios.tcgetattr(self.stream)\n tty.setcbreak(self.stream)\n def __exit__(self, type, value, traceback):\n termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)\n\nclass NonBlockingStream(object):\n def __init__(self, stream):\n self.stream = stream\n self.fd = self.stream.fileno()\n def __enter__(self):\n self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)\n fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)\n def __exit__(self, *args):\n fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)\n\n\nclass GCodeInstruction:\n def __init__(self, cmd, params):\n self.cmd_num = int(cmd[1:])\n self.cmd = cmd[0]\n self.params = params\n\n def toString(self):\n param_str = \"\"\n for key,value in self.params.items():\n param_str+=key+(\"{:.2f}\".format(value))+\" \"\n return self.cmd+str(self.cmd_num)+\" \"+param_str[:-1]\n\n @classmethod\n def parse(cls, str):\n words = str.strip().split(' ')\n cmd = words[0]\n if len(cmd)<2:\n return None\n params = {}\n for f in words[1:]:\n key = f[0]\n value = float(f[1:])\n params[key] = value\n return cls(cmd, params)\n\nclass Gcode:\n def __init__(self, instructions):\n self.instructions = instructions\n\n def bounds(self):\n max_x = 0\n max_y = 0\n min_y = sys.maxsize\n min_x = sys.maxsize\n for i in self.instructions:\n if 'X' in i.params:\n x = i.params['X']\n # if 'I' in i.params:\n # x+=i.params['I']\n if x>max_x:\n max_x = x \n if xmax_y:\n max_y = y \n if ySample:\n image_file = self.list_images[item]\n\n with open(image_file + \".cat\", \"r\") as f:\n content = f.read()\n\n pos = [int(e) for e in content.split(\" \") if e != '']\n\n pos = array(pos)[1:]\n\n x = pos[::2]\n\n y = pos[1::2]\n\n xmin, xmax = x.min(), x.max()\n ymin, ymax = y.min(), y.max()\n\n return Sample(\n x=cv2.imread(image_file),\n y=array([xmin, xmax, ymin, ymax])\n )\n\n\n\n", "sub_path": "examples/cat_detection/data/cat_loader.py", "file_name": "cat_loader.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nautilus.dataset.dataset.Dataset", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 25, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 25, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.ma.array", "line_number": 44, "usage_type": "call"}, {"api_name": "nautilus.data.sample.sample.Sample", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.ma.array", "line_number": 55, "usage_type": "call"}, {"api_name": "nautilus.data.sample.sample.Sample", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "496769857", "text": "import math\nfrom neuron import neuron\nclass layer:\n def __init__(self, size, numIn):\n w_initialization = [0 - math.sqrt(1 / numIn), math.sqrt(1 / numIn)]\n neurons = []\n for i in range(size):\n neurons.append(neuron(0, numIn, w_initialization))\n\n self.neurons = neurons\n self.size = size\n self. numIn = numIn\n \n def activate(self, inputValues):\n if len(inputValues) != self.numIn:\n print(\"Error: Number of inputs does not match layer input parametres\")\n return None\n\n for neuronIndex in range(len(self.neurons)):\n self.neurons[neuronIndex].activate(inputValues)", "sub_path": "layer.py", "file_name": "layer.py", "file_ext": "py", "file_size_in_byte": 668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "math.sqrt", "line_number": 5, "usage_type": "call"}, {"api_name": "neuron.neuron", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "23136834", "text": "import torch\nimport torch.onnx\nfrom torch.utils import data\nimport torchvision\nimport torchvision.models as models\nimport numpy as np\nimport cv2\nimport pytesseract\nfrom dataset import custom_dataset\nfrom imutils.object_detection import non_max_suppression\nimport sys\nimport os\nimport copy\nfrom model import EAST\nfrom matplotlib import pyplot as plt\n\ndef predictions(prob_score, geo, min_confidence):\n\t(numR, numC) = prob_score.shape[2:4]\n\tboxes = []\n\tconfidence_val = []\n\n\t# loop over rows\n\tfor y in range(0, numR):\n\t\tscoresData = prob_score[0, 0, y]\n\t\tx0 = geo[0, 0, y]\n\t\tx1 = geo[0, 1, y]\n\t\tx2 = geo[0, 2, y]\n\t\tx3 = geo[0, 3, y]\n\t\tanglesData = geo[0, 4, y]\n\n\t\t# loop over the number of columns\n\t\tfor i in range(0, numC):\n\t\t\tif scoresData[i] < min_confidence:\n\t\t\t\t#print (scoresData[i])\n\t\t\t\t#print ('Low Confidence!')\n\t\t\t\tcontinue\n\n\t\t\t(offX, offY) = (i * 4.0, y * 4.0)\n\n\t\t\t# extracting the rotation angle for the prediction and computing the sine and cosine\n\t\t\tangle = anglesData[i]\n\t\t\tcos = np.cos(angle)\n\t\t\tsin = np.sin(angle)\n\n\t\t\t# using the geo volume to get the dimensions of the bounding box\n\t\t\th = x0[i] + x2[i]\n\t\t\tw = x1[i] + x3[i]\n\n\t\t\t# compute start and end for the text pred bbox\n\t\t\tendX = int(offX + (cos * x1[i]) + (sin * x2[i]))\n\t\t\tendY = int(offY - (sin * x1[i]) + (cos * x2[i]))\n\t\t\tstartX = int(endX - w)\n\t\t\tstartY = int(endY - h)\n\n\t\t\tboxes.append((startX, startY, endX, endY))\n\t\t\tconfidence_val.append(scoresData[i])\n\n\t# return bounding boxes and associated confidence_val\n\treturn (boxes, confidence_val)\n\ndef connect_horizontal_boxes(boxes, x_threshold=30, y_threshold=30):\n\tboxes_copy = boxes.copy()\n\tbox_it = sorted(boxes_copy, key=lambda tup: tup[0])\n\n\tdone = False\n\twhile (done == False):\n\t\tmerger = (1e6, 1e6)\n\t\tbox_to_merge = (0, 0, 0, 0)\n\t\tfound = False\n\t\ti = 0\n\t\tfor box in box_it:\n\t\t\t(start_X, start_Y, end_X, end_Y) = box\n\t\t\tj = 0\n\t\t\tfor new_box in box_it:\n\t\t\t\tif (i < j):\n\t\t\t\t\t(start_Xn, start_Yn, end_Xn, end_Yn) = new_box\n\t\t\t\t\tstartYdiff = np.abs(start_Yn - start_Y)\n\t\t\t\t\tendYdiff = np.abs(end_Yn - end_Y)\n\t\t\t\t\tYdiff = startYdiff + endYdiff\n\t\t\t\t\tif (Ydiff < y_threshold):\n\t\t\t\t\t\tXdiff = np.abs(start_Xn - end_X) \n\t\t\t\t\t\tif ((start_Xn <= end_X) or (Xdiff < x_threshold)):\n\t\t\t\t\t\t\tmerger = (i, j)\n\t\t\t\t\t\t\tsY = np.minimum(start_Y, start_Yn)\n\t\t\t\t\t\t\teY = np.maximum(end_Y, end_Yn)\n\t\t\t\t\t\t\tfound = True\n\n\t\t\t\t\t\t\tif (start_Xn <= end_X):\n\t\t\t\t\t\t\t\teX = np.maximum(end_X, end_Xn)\n\t\t\t\t\t\t\t\tbox_to_merge = (start_X, sY, eX, eY)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbox_to_merge = (start_X, sY, end_Xn, eY)\n\t\t\t\t\t\t\tbreak\n\t\t\t\tj += 1\n\t\t\tif (found == True):\n\t\t\t\tbreak\n\t\t\ti += 1\n\n\t\t#delete merger, and add new box, assume i before j\n\t\tif (found == True):\n\t\t\tbox_change = copy.deepcopy(box_it)\n\t\t\tbox_change.pop(merger[0])\n\t\t\tbox_change.pop(merger[1]-1)\n\t\t\tbox_change.append(box_to_merge)\n\t\t\tbox_change = sorted(box_change, key=lambda tup: tup[0])\n\t\t\tbox_it = copy.deepcopy(box_change)\n\t\telse:\n\t\t\tdone = True\n\n\treturn box_it\n\ndef process_image(image_read, image_real, east, min_confidence, width, height, hyst_X=0, hyst_Y=0, offset_X=0, offset_Y=0, remove_boxes=False):\n\n\t#unnecessary default\n\targs = {\"image\":\"/Users/surajmenon/Desktop/findocDocs/apple_test1.png\", \"east\":\"/Users/surajmenon/Desktop/findocDocs/frozen_east_text_detection.pb\", \"min_confidence\":0.5, \"width\":320, \"height\":320}\n\n\targs['image'] = image_real\n\targs['east'] = east\n\targs['min_confidence'] = min_confidence\n\targs['width'] = width\n\targs['height'] = height\n\n\tif (image_read == True):\n\t\timage = cv2.imread(args['image'])\n\telse:\n\t\timage = args['image']\n\n\t#print ('Processing Image')\n\t#print (image.shape)\n\tprint ('.')\n\n\n\t#Saving a original image and shape\n\torig = image.copy()\n\t(origH, origW) = image.shape[:2]\n\n\t# print ('Image Size')\n\t# print (origH)\n\t# print (origW)\n\t# exit()\n\n\t# set the new height and width to default 320 by using args #dictionary. \n\t(newW, newH) = (args[\"width\"], args[\"height\"])\n\n\t#Calculate the ratio between original and new image for both height and weight. \n\t#This ratio will be used to translate bounding box location on the original image. \n\trW = origW / float(newW)\n\trH = origH / float(newH)\n\n\t# resize the original image to new dimensions\n\timage = cv2.resize(image, (newW, newH))\n\t(H, W) = image.shape[:2]\n\n\tnet = args[\"east\"]\n\n\tblob = cv2.dnn.blobFromImage(image, 1.0, (W, H),\n\t(123.68, 116.78, 103.94), swapRB=True, crop=False)\n\n\t# construct a blob from the image to forward pass it to EAST model\n\t# blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),\n\t# \t(123.68, 116.78, 103.94), swapRB=True, crop=False)\n\n\t# net = cv2.dnn.readNet(args[\"east\"])\n\n\t# We would like to get two outputs from the EAST model. \n\t#1. Probabilty scores for the region whether that contains text or not. \n\t#2. Geometry of the text -- Coordinates of the bounding box detecting a text\n\t# The following two layer need to pulled from EAST model for achieving this. \n\t# layerNames = [\n\t# \t\"feature_fusion/Conv_7/Sigmoid\",\n\t# \t\"feature_fusion/concat_3\"]\n\n\t# net.setInput(blob)\n\t#(scores, geometry) = net.forward(layerNames)\n\n\tprint (blob.shape)\n\t#image_r = image.reshape(1, 3, H, W)\n\tprint (blob.dtype)\n\tprint (blob.shape)\n\timage_r_pt = torch.from_numpy(blob)\n\tprint (image_r_pt.shape)\n\tprint (image_r_pt.dtype)\n\timage_r_pt = image_r_pt.type(torch.FloatTensor)\n\t(scores, geometry) = net(image_r_pt)\n\tprint (scores.shape)\n\tprint (geometry.shape)\n\n\tscores_n = scores.detach().cpu().numpy()\n\tgeometry_n = geometry.detach().cpu().numpy()\n\n\t(boxes, confidence_val) = predictions(scores_n, geometry_n, args['min_confidence'])\n\tboxes = non_max_suppression(np.array(boxes), probs=confidence_val)\n\n\t##Text Detection and Recognition \n\n\t# initialize the list of results\n\tresults = []\n\t\n\t#for now, say we don't want any X-shifting\n\tx_start_buffer = 0\n\n\t#boxes = connect_horizontal_boxes(boxes, x_threshold=50, y_threshold=20) \n\tadjusted_boxes = []\n\n\t# loop over the bounding boxes to find the coordinate of bounding boxes\n\tfor (startX, startY, endX, endY) in boxes:\n\t\t# scale the coordinates based on the respective ratios in order to reflect bounding box on the original image\n\t\tstartX = int(startX * rW) - hyst_X - x_start_buffer\n\t\tstartY = int(startY * rH) - hyst_Y \n\t\tendX = int(endX * rW) + hyst_X - x_start_buffer\n\t\tendY = int(endY * rH) + hyst_Y \n\n\t\t#bound the bound\n\t\tif (startX < 0):\n\t\t\tstartX = 0\n\t \n\t\tif (startY < 0):\n\t\t\tstartY = 0\n\n\t\tif (endX > origW):\n\t\t\tendX = origW-1\n\t\tif (endY > origH):\n\t\t\tendY = origH-1\n\n\t\tadjusted_box = (startX, startY, endX, endY)\n\t\tadjusted_boxes.append(adjusted_box)\n\n\t#adjusted_boxes = connect_horizontal_boxes(adjusted_boxes, x_threshold=5, y_threshold=15) \n\n\tfor (startX, startY, endX, endY) in adjusted_boxes:\n\t\t#extract the region of interest\n\t\tr = orig[startY:endY, startX:endX]\n\n\t\t#configuration setting to convert image to string. \n\t\t#configuration = (\"-l eng --oem 1 --psm 8\")\n\t\tconfiguration = (\"-l eng --oem 1 --psm 7\")\n\t ##This will recognize the text from the image of bounding box\n\n\n\t\ttry:\n\t\t\ttext = pytesseract.image_to_string(r, config=configuration)\n\t\texcept:\n\t\t\tprint ('Some bounding box out of order')\n\t\t\ttext = 'GHAJEFKJEKAFJEKFAJEFKEJKFAEK'\n\n\t\t# append bbox coordinate and associated text to the list of results \n\t\tresults.append(((startX, startY, endX, endY), text))\n\n\treturn orig, results\n\ndef show_image(image, results):\n\n\t#Display the image with bounding box and recognized text\n\t#orig_image = orig.copy()\n\torig_image = image.copy()\n\n\t# Moving over the results and display on the image\n\tfor ((start_X, start_Y, end_X, end_Y), text) in results:\n\t\t# display the text detected by Tesseract\n\t\tprint(\"{}\\n\".format(text))\n\n\t\t# Displaying text\n\t\ttext = \"\".join([x if ord(x) < 128 else \"\" for x in text]).strip()\n\t\tcv2.rectangle(orig_image, (start_X, start_Y), (end_X, end_Y),\n\t\t\t(0, 0, 255), 2)\n\t\tcv2.putText(orig_image, text, (start_X, start_Y - 30),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7,(0,0, 255), 2)\n\n\tplt.imshow(orig_image)\n\tplt.title('Output')\n\tplt.show()\n\nmodel_name = './pths/east_vgg16.pth'\n#model_name = './pths/sm2-300.pth'\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel = EAST(False).to(device)\nmodel.load_state_dict(torch.load(model_name, map_location=torch.device('cpu')))\n\n# set the model to inference mode\nmodel.eval()\n\n#img_path = \"/Users/surajmenon/Desktop/findocDocs/apple_tc_full1.png\"\nimg_path = \"/Users/surajmenon/Desktop/findocDocs/test_image1.jpg\"\nmin_confidence = .99\nheight = 512\nwidth = 512\n\nprocess_date_x = 15\nprocess_date_y = 5\n\nr_image, results = process_image(True, img_path, model, min_confidence, height, width, hyst_X=process_date_x, hyst_Y=process_date_y)\nshow_image(r_image, results)\n\n", "sub_path": "EAST-master-torch/model_test.py", "file_name": "model_test.py", "file_ext": "py", "file_size_in_byte": 8565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.cos", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 89, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 101, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 124, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 183, "usage_type": "attribute"}, {"api_name": "imutils.object_detection.non_max_suppression", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 264, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 266, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 267, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 275, "usage_type": "attribute"}, {"api_name": "model.EAST", "line_number": 276, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 277, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 280, "usage_type": "call"}]} +{"seq_id": "131989729", "text": "# -*- coding: utf-8 -*-\nimport MySQLdb\nimport MySQLdb.cursors\nfrom twisted.enterprise import adbapi\nfrom scrapy.utils.project import get_project_settings\n# import shortuuid\n# import uuid\n\n\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n'''\n\n'''\nSETTINGS = get_project_settings()\n\nclass BrotherwatchingPipeline(object):\n\n def __init__(self):\n self.dbpool = adbapi.ConnectionPool ('MySQLdb',\n host=SETTINGS['DB_HOST'],\n user=SETTINGS['DB_USER'],\n passwd=SETTINGS['DB_PASSWD'],\n port=SETTINGS['DB_PORT'],\n db=SETTINGS['DB_DB'],\n charset='utf8',\n use_unicode = True,\n cursorclass=MySQLdb.cursors.DictCursor\n )\n\n def __del__(self):\n self.dbpool.close()\n\n def process_item(self,item,spider):\n sql='INSERT IGNORE INTO app_review (%s) VALUES (%s)'\n keys = item.keys()\n rows=', '.join(keys)\n values = ','.join(['\\'%s\\'' % item[k] for k in keys])\n self.dbpool.runOperation(sql % (rows,values))\n return item\n", "sub_path": "BrotherWatching/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "scrapy.utils.project.get_project_settings", "line_number": 18, "usage_type": "call"}, {"api_name": "twisted.enterprise.adbapi.ConnectionPool", "line_number": 23, "usage_type": "call"}, {"api_name": "twisted.enterprise.adbapi", "line_number": 23, "usage_type": "name"}, {"api_name": "MySQLdb.cursors", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "445210687", "text": "# -*- coding: utf-8 -*-\n\"\"\"Simple models for super resolution such as linear interp models.\"\"\"\nimport numpy as np\nimport logging\nfrom inspect import signature\nimport os\nimport json\nfrom sup3r.utilities.utilities import st_interp\nfrom sup3r.models.abstract import AbstractInterface\n\nlogger = logging.getLogger(__name__)\n\n\nclass LinearInterp(AbstractInterface):\n \"\"\"Simple model to do linear interpolation on the spatial and temporal axes\n \"\"\"\n\n def __init__(self, features, s_enhance, t_enhance, t_centered=False):\n \"\"\"\n Parameters\n ----------\n features : list\n List of feature names that this model will operate on for both\n input and output. This must match the feature axis ordering in the\n array input to generate().\n s_enhance : int\n Integer factor by which the spatial axes is to be enhanced.\n t_enhance : int\n Integer factor by which the temporal axes is to be enhanced.\n t_centered : bool\n Flag to switch time axis from time-beginning (Default, e.g.\n interpolate 00:00 01:00 to 00:00 00:30 01:00 01:30) to\n time-centered (e.g. interp 01:00 02:00 to 00:45 01:15 01:45 02:15)\n \"\"\"\n\n self._features = features\n self._s_enhance = s_enhance\n self._t_enhance = t_enhance\n self._t_centered = t_centered\n\n @classmethod\n def load(cls, model_dir, verbose=False):\n \"\"\"Load the LinearInterp model with its params saved to the model_dir\n created with LinearInterp.save(model_dir)\n\n Parameters\n ----------\n model_dir : str\n Directory to load LinearInterp model files from. Must\n have a model_params.json file containing \"meta\" key with all of the\n class init args.\n verbose : bool\n Flag to log information about the loaded model.\n\n Returns\n -------\n out : LinearInterp\n Returns an initialized LinearInterp model\n \"\"\"\n fp_params = os.path.join(model_dir, 'model_params.json')\n assert os.path.exists(fp_params), f'Could not find: {fp_params}'\n with open(fp_params, 'r') as f:\n params = json.load(f)\n\n meta = params['meta']\n args = signature(cls.__init__).parameters\n kwargs = {k: v for k, v in meta.items() if k in args}\n model = cls(**kwargs)\n\n if verbose:\n logger.info('Loading LinearInterp with meta data: {}'\n .format(model.meta))\n\n return model\n\n @property\n def meta(self):\n \"\"\"Get meta data dictionary that defines the model params\"\"\"\n return {'features': self._features,\n 's_enhance': self._s_enhance,\n 't_enhance': self._t_enhance,\n 't_centered': self._t_centered,\n 'training_features': self.training_features,\n 'output_features': self.output_features,\n 'class': self.__class__.__name__,\n }\n\n @property\n def training_features(self):\n \"\"\"Get the list of input feature names that the generative model was\n trained on.\n \"\"\"\n return self._features\n\n @property\n def output_features(self):\n \"\"\"Get the list of output feature names that the generative model\n outputs\"\"\"\n return self._features\n\n def save(self, out_dir):\n \"\"\"\n Parameters\n ----------\n out_dir : str\n Directory to save linear model params. This directory will be\n created if it does not already exist.\n \"\"\"\n self.save_params(out_dir)\n\n # pylint: disable=unused-argument\n def generate(self, low_res, norm_in=False, un_norm_out=False,\n exogenous_data=None):\n \"\"\"Use the generator model to generate high res data from low res\n input. This is the public generate function.\n\n Parameters\n ----------\n low_res : np.ndarray\n Low-resolution spatiotemporal input data, a 5D array of shape:\n (n_obs, spatial_1, spatial_2, temporal, n_features)\n norm_in : bool\n This doesnt do anything for this LinearInterp, but is\n kept to keep the same interface as Sup3rGan\n un_norm_out : bool\n This doesnt do anything for this LinearInterp, but is\n kept to keep the same interface as Sup3rGan\n exogenous_data : list\n This doesnt do anything for this LinearInterp, but is\n kept to keep the same interface as Sup3rGan\n\n Returns\n -------\n hi_res : ndarray\n high-resolution spatial output data, a 5D array of shape:\n (n_obs, spatial_1, spatial_2, temporal, n_features)\n \"\"\"\n\n hr_shape = (len(low_res),\n int(low_res.shape[1] * self._s_enhance),\n int(low_res.shape[2] * self._s_enhance),\n int(low_res.shape[3] * self._t_enhance),\n len(self.output_features))\n logger.debug('LinearInterp model with s_enhance of {} '\n 'and t_enhance of {} '\n 'downscaling low-res shape {} to high-res shape {}'\n .format(self._s_enhance, self._t_enhance,\n low_res.shape, hr_shape))\n\n hi_res = np.zeros(hr_shape, dtype=np.float32)\n\n for iobs in range(len(low_res)):\n for idf in range(low_res.shape[-1]):\n hi_res[iobs, ..., idf] = st_interp(low_res[iobs, ..., idf],\n self.s_enhance,\n self.t_enhance,\n t_centered=self._t_centered)\n\n return hi_res\n", "sub_path": "sup3r/models/linear.py", "file_name": "linear.py", "file_ext": "py", "file_size_in_byte": 5814, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "sup3r.models.abstract.AbstractInterface", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 63, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 150, "usage_type": "attribute"}, {"api_name": "sup3r.utilities.utilities.st_interp", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "10262458", "text": "import plotly.express as px\nimport csv\nimport numpy as np\n\ndef plotfigure(data_path):\n with open(data_path) as csv_file:\n df=csv.DictReader(csv_file)\n fig=px.scatter(df,x=\"Days Present\",y=\"Marks In Percentage\")\n fig.show()\ndef getDataSource(data_path):\n MarksInPercentage=[]\n DaysPresent=[]\n with open(data_path)as csv_file:\n csv_reader=csv.DictReader(csv_file)\n for row in csv_reader:\n MarksInPercentage.append(float(row[\"Marks In Percentage\"]))\n DaysPresent.append(float(row[\"Days Present\"]))\n\n \n return{\"x\":MarksInPercentage,\"y\":DaysPresent}\n\ndef findcorrelation(dataSource):\n correlation=np.corrcoef(dataSource[\"x\"],dataSource[\"y\"])\n print(\"Co relation between Marks and Days Present: \\n=\",correlation[0,1])\n\n\ndef setup():\n data_path=\"data2.csv\"\n dataSource=getDataSource(data_path)\n findcorrelation(dataSource)\n plotfigure(data_path)\nsetup()\n", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 944, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "csv.DictReader", "line_number": 7, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 8, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 8, "usage_type": "name"}, {"api_name": "csv.DictReader", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "141357636", "text": "#!/usr/bin/python3\nimport itertools\n\n__author__ = 'Pavel Yurgin'\n\nimport bz2\n\n\ndef read_header(wiki):\n header = []\n for line in wiki:\n header.append(line)\n if line.strip() == '':\n return header\n\n\ndef read_page(wiki, skip_redirect=True):\n page = []\n for line in wiki:\n if '#REDIRECT' in line and skip_redirect:\n for line in wiki:\n if line.strip() == '':\n page = []\n break\n else:\n page.append(line)\n if line.strip() == '':\n return page\n\n\ndef split_wiki(input, output, count=float('inf'), skip_redirect=True):\n with bz2.open(input, mode='rt') as input, open(output, 'w', buffering=1024 * 1024) as output:\n header = read_header(input)\n output.writelines(header)\n for i in itertools.count():\n if i > count:\n break\n page = read_page(input, skip_redirect=skip_redirect)\n output.write('\\n'.join(page))\n if i % 1000 == 0 and i != 0:\n print('{} pages processed'.format(i))\n output.write('')\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(\n '''Simple script for getting part of compressed wikipedia dump with ''')\n parser.add_argument('--input', metavar='input', type=str,\n help='path to input compressed wikipedia xml', required=True)\n\n parser.add_argument('--output', metavar='output', type=str,\n help='path to output xml', required=True)\n\n parser.add_argument('--count', metavar='count', type=int, required=True,\n help='page count')\n parser.add_argument('--skip_redirected', metavar='skip_redirected', type=bool,\n help='skip page with redirect')\n\n args = parser.parse_args()\n args = vars(args)\n args = {key: args[key] for key in args if args[key] is not None}\n\n split_wiki(**args)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "dataset/split_wiki_dump.py", "file_name": "split_wiki_dump.py", "file_ext": "py", "file_size_in_byte": 2054, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "bz2.open", "line_number": 32, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 35, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "209043014", "text": "# Copyright (c) 2012-2013 LiuYC https://github.com/liuyichen/\n# Copyright 2012-2014 ksyun.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport base64\nimport datetime\nfrom hashlib import sha256\nfrom hashlib import sha1\nimport hmac\nimport logging\nfrom email.utils import formatdate\nfrom operator import itemgetter\nimport functools\nimport time\nimport calendar\n\nfrom kscore.exceptions import NoCredentialsError\nfrom kscore.utils import normalize_url_path, percent_encode_sequence\nfrom kscore.compat import HTTPHeaders\nfrom kscore.compat import quote, unquote, urlsplit, parse_qs\nfrom kscore.compat import urlunsplit\nfrom kscore.compat import json\nfrom collections import namedtuple\n\n\nimport sys\nimport logging\nimport select\nimport functools\nimport socket\nimport inspect\n\nfrom kscore.compat import six\nfrom kscore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit\nfrom kscore.exceptions import UnseekableStreamError\nfrom kscore.utils import percent_encode_sequence\nfrom kscore.vendored.requests import models\nfrom kscore.vendored.requests.sessions import REDIRECT_STATI\nfrom kscore.vendored.requests.packages.urllib3.connection import \\\n VerifiedHTTPSConnection\nfrom kscore.vendored.requests.packages.urllib3.connection import \\\n HTTPConnection\nfrom kscore.vendored.requests.packages.urllib3.connectionpool import \\\n HTTPConnectionPool\nfrom kscore.vendored.requests.packages.urllib3.connectionpool import \\\n HTTPSConnectionPool\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass KSHTTPResponse(HTTPResponse):\n # The *args, **kwargs is used because the args are slightly\n # different in py2.6 than in py2.7/py3.\n def __init__(self, *args, **kwargs):\n self._status_tuple = kwargs.pop('status_tuple')\n HTTPResponse.__init__(self, *args, **kwargs)\n\n def _read_status(self):\n if self._status_tuple is not None:\n status_tuple = self._status_tuple\n self._status_tuple = None\n return status_tuple\n else:\n return HTTPResponse._read_status(self)\n\n\nclass KSHTTPConnection(HTTPConnection):\n \"\"\"HTTPConnection that supports Expect 100-continue.\n\n This is conceptually a subclass of httplib.HTTPConnection (though\n technically we subclass from urllib3, which subclasses\n httplib.HTTPConnection) and we only override this class to support Expect\n 100-continue, which we need for S3. As far as I can tell, this is\n general purpose enough to not be specific to S3, but I'm being\n tentative and keeping it in kscore because I've only tested\n this against KSYUN services.\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n HTTPConnection.__init__(self, *args, **kwargs)\n self._original_response_cls = self.response_class\n # We'd ideally hook into httplib's states, but they're all\n # __mangled_vars so we use our own state var. This variable is set\n # when we receive an early response from the server. If this value is\n # set to True, any calls to send() are noops. This value is reset to\n # false every time _send_request is called. This is to workaround the\n # fact that py2.6 (and only py2.6) has a separate send() call for the\n # body in _send_request, as opposed to endheaders(), which is where the\n # body is sent in all versions > 2.6.\n self._response_received = False\n self._expect_header_set = False\n\n def close(self):\n HTTPConnection.close(self)\n # Reset all of our instance state we were tracking.\n self._response_received = False\n self._expect_header_set = False\n self.response_class = self._original_response_cls\n\n def _tunnel(self):\n # Works around a bug in py26 which is fixed in later versions of\n # python. Bug involves hitting an infinite loop if readline() returns\n # nothing as opposed to just ``\\r\\n``.\n # As much as I don't like having if py2: code blocks, this seems\n # the cleanest way to handle this workaround. Fortunately, the\n # difference from py26 to py3 is very minimal. We're essentially\n # just overriding the while loop.\n if sys.version_info[:2] != (2, 6):\n return HTTPConnection._tunnel(self)\n\n # Otherwise we workaround the issue.\n self._set_hostport(self._tunnel_host, self._tunnel_port)\n self.send(\"CONNECT %s:%d HTTP/1.0\\r\\n\" % (self.host, self.port))\n for header, value in self._tunnel_headers.iteritems():\n self.send(\"%s: %s\\r\\n\" % (header, value))\n self.send(\"\\r\\n\")\n response = self.response_class(self.sock, strict=self.strict,\n method=self._method)\n (version, code, message) = response._read_status()\n\n if code != 200:\n self.close()\n raise socket.error(\"Tunnel connection failed: %d %s\" %\n (code, message.strip()))\n while True:\n line = response.fp.readline()\n if not line:\n break\n if line in (b'\\r\\n', b'\\n', b''):\n break\n\n def _send_request(self, method, url, body, headers, *py36_up_extra):\n self._response_received = False\n if headers.get('Expect', b'') == b'100-continue':\n self._expect_header_set = True\n else:\n self._expect_header_set = False\n self.response_class = self._original_response_cls\n rval = HTTPConnection._send_request(\n self, method, url, body, headers, *py36_up_extra)\n self._expect_header_set = False\n return rval\n\n def _convert_to_bytes(self, mixed_buffer):\n # Take a list of mixed str/bytes and convert it\n # all into a single bytestring.\n # Any six.text_types will be encoded as utf-8.\n bytes_buffer = []\n for chunk in mixed_buffer:\n if isinstance(chunk, six.text_type):\n bytes_buffer.append(chunk.encode('utf-8'))\n else:\n bytes_buffer.append(chunk)\n msg = b\"\\r\\n\".join(bytes_buffer)\n return msg\n\n def _send_output(self, message_body=None, **py36_up_extra):\n self._buffer.extend((b\"\", b\"\"))\n msg = self._convert_to_bytes(self._buffer)\n del self._buffer[:]\n # If msg and message_body are sent in a single send() call,\n # it will avoid performance problems caused by the interaction\n # between delayed ack and the Nagle algorithm.\n if isinstance(message_body, bytes):\n msg += message_body\n message_body = None\n self.send(msg)\n if self._expect_header_set:\n # This is our custom behavior. If the Expect header was\n # set, it will trigger this custom behavior.\n logger.debug(\"Waiting for 100 Continue response.\")\n # Wait for 1 second for the server to send a response.\n read, write, exc = select.select([self.sock], [], [self.sock], 1)\n if read:\n self._handle_expect_response(message_body)\n return\n else:\n # From the RFC:\n # Because of the presence of older implementations, the\n # protocol allows ambiguous situations in which a client may\n # send \"Expect: 100-continue\" without receiving either a 417\n # (Expectation Failed) status or a 100 (Continue) status.\n # Therefore, when a client sends this header field to an origin\n # server (possibly via a proxy) from which it has never seen a\n # 100 (Continue) status, the client SHOULD NOT wait for an\n # indefinite period before sending the request body.\n logger.debug(\"No response seen from server, continuing to \"\n \"send the response body.\")\n if message_body is not None:\n # message_body was not a string (i.e. it is a file), and\n # we must run the risk of Nagle.\n self.send(message_body)\n\n def _consume_headers(self, fp):\n # Most servers (including S3) will just return\n # the CLRF after the 100 continue response. However,\n # some servers (I've specifically seen this for squid when\n # used as a straight HTTP proxy) will also inject a\n # Connection: keep-alive header. To account for this\n # we'll read until we read '\\r\\n', and ignore any headers\n # that come immediately after the 100 continue response.\n current = None\n while current != b'\\r\\n':\n current = fp.readline()\n\n def _handle_expect_response(self, message_body):\n # This is called when we sent the request headers containing\n # an Expect: 100-continue header and received a response.\n # We now need to figure out what to do.\n fp = self.sock.makefile('rb', 0)\n try:\n maybe_status_line = fp.readline()\n parts = maybe_status_line.split(None, 2)\n if self._is_100_continue_status(maybe_status_line):\n self._consume_headers(fp)\n logger.debug(\"100 Continue response seen, \"\n \"now sending request body.\")\n self._send_message_body(message_body)\n elif len(parts) == 3 and parts[0].startswith(b'HTTP/'):\n # From the RFC:\n # Requirements for HTTP/1.1 origin servers:\n #\n # - Upon receiving a request which includes an Expect\n # request-header field with the \"100-continue\"\n # expectation, an origin server MUST either respond with\n # 100 (Continue) status and continue to read from the\n # input stream, or respond with a final status code.\n #\n # So if we don't get a 100 Continue response, then\n # whatever the server has sent back is the final response\n # and don't send the message_body.\n logger.debug(\"Received a non 100 Continue response \"\n \"from the server, NOT sending request body.\")\n status_tuple = (parts[0].decode('ascii'),\n int(parts[1]), parts[2].decode('ascii'))\n response_class = functools.partial(\n KSHTTPResponse, status_tuple=status_tuple)\n self.response_class = response_class\n self._response_received = True\n finally:\n fp.close()\n\n def _send_message_body(self, message_body):\n if message_body is not None:\n self.send(message_body)\n\n def send(self, str):\n if self._response_received:\n logger.debug(\"send() called, but reseponse already received. \"\n \"Not sending data.\")\n return\n return HTTPConnection.send(self, str)\n\n def _is_100_continue_status(self, maybe_status_line):\n parts = maybe_status_line.split(None, 2)\n # Check for HTTP/ 100 Continue\\r\\n\n return (\n len(parts) >= 3 and parts[0].startswith(b'HTTP/') and\n parts[1] == b'100')\n\n\nclass KSHTTPSConnection(VerifiedHTTPSConnection):\n pass\n\n\n# Now we need to set the methods we overrode from KSHTTPConnection\n# onto KSHTTPSConnection. This is just a shortcut to avoid\n# copy/pasting the same code into KSHTTPSConnection.\nfor name, function in KSHTTPConnection.__dict__.items():\n if inspect.isfunction(function):\n setattr(KSHTTPSConnection, name, function)\n\n\ndef prepare_request_dict(request_dict, endpoint_url, user_agent=None):\n \"\"\"\n This method prepares a request dict to be created into an\n KSRequestObject. This prepares the request dict by adding the\n url and the user agent to the request dict.\n\n :type request_dict: dict\n :param request_dict: The request dict (created from the\n ``serialize`` module).\n\n :type user_agent: string\n :param user_agent: The user agent to use for this request.\n\n :type endpoint_url: string\n :param endpoint_url: The full endpoint url, which contains at least\n the scheme, the hostname, and optionally any path components.\n \"\"\"\n r = request_dict\n if user_agent is not None:\n headers = r['headers']\n headers['User-Agent'] = user_agent\n url = _urljoin(endpoint_url, r['url_path'])\n if r['query_string']:\n encoded_query_string = percent_encode_sequence(r['query_string'])\n if '?' not in url:\n url += '?%s' % encoded_query_string\n else:\n url += '&%s' % encoded_query_string\n r['url'] = url\n\n\ndef create_request_object(request_dict):\n \"\"\"\n This method takes a request dict and creates an KSRequest object\n from it.\n\n :type request_dict: dict\n :param request_dict: The request dict (created from the\n ``prepare_request_dict`` method).\n\n :rtype: ``kscore.ksrequest.KSRequest``\n :return: An KSRequest object based on the request_dict.\n\n \"\"\"\n r = request_dict\n return KSRequest(method=r['method'], url=r['url'],\n data=r['body'],\n headers=r['headers'])\n\n\ndef _urljoin(endpoint_url, url_path):\n p = urlsplit(endpoint_url)\n # - \n # scheme - p[0]\n # netloc - p[1]\n # path - p[2]\n # query - p[3]\n # fragment - p[4]\n if not url_path or url_path == '/':\n # If there's no path component, ensure the URL ends with\n # a '/' for backwards compatibility.\n if not p[2]:\n return endpoint_url + '/'\n return endpoint_url\n if p[2].endswith('/') and url_path.startswith('/'):\n new_path = p[2][:-1] + url_path\n else:\n new_path = p[2] + url_path\n reconstructed = urlunsplit((p[0], p[1], new_path, p[3], p[4]))\n return reconstructed\n\n\nclass KSRequest(models.RequestEncodingMixin, models.Request):\n def __init__(self, *args, **kwargs):\n self.auth_path = None\n if 'auth_path' in kwargs:\n self.auth_path = kwargs['auth_path']\n del kwargs['auth_path']\n models.Request.__init__(self, *args, **kwargs)\n headers = HTTPHeaders()\n if self.headers is not None:\n for key, value in self.headers.items():\n headers[key] = value\n self.headers = headers\n # This is a dictionary to hold information that is used when\n # processing the request. What is inside of ``context`` is open-ended.\n # For example, it may have a timestamp key that is used for holding\n # what the timestamp is when signing the request. Note that none\n # of the information that is inside of ``context`` is directly\n # sent over the wire; the information is only used to assist in\n # creating what is sent over the wire.\n self.context = {}\n\n def prepare(self):\n \"\"\"Constructs a :class:`KSPreparedRequest `.\"\"\"\n # Eventually I think it would be nice to add hooks into this process.\n p = KSPreparedRequest(self)\n p.prepare_method(self.method)\n p.prepare_url(self.url, self.params)\n p.prepare_headers(self.headers)\n p.prepare_cookies(self.cookies)\n p.prepare_body(self.data, self.files)\n p.prepare_auth(self.auth)\n return p\n\n @property\n def body(self):\n p = models.PreparedRequest()\n p.prepare_headers({})\n p.prepare_body(self.data, self.files)\n if isinstance(p.body, six.text_type):\n p.body = p.body.encode('utf-8')\n return p.body\n\n\nclass KSPreparedRequest(models.PreparedRequest):\n \"\"\"Represents a prepared request.\n\n :ivar method: HTTP Method\n :ivar url: The full url\n :ivar headers: The HTTP headers to send.\n :ivar body: The HTTP body.\n :ivar hooks: The set of callback hooks.\n\n In addition to the above attributes, the following attributes are\n available:\n\n :ivar query_params: The original query parameters.\n :ivar post_param: The original POST params (dict).\n\n \"\"\"\n def __init__(self, original_request):\n self.original = original_request\n super(KSPreparedRequest, self).__init__()\n self.hooks.setdefault('response', []).append(\n self.reset_stream_on_redirect)\n\n def reset_stream_on_redirect(self, response, **kwargs):\n if response.status_code in REDIRECT_STATI and \\\n self._looks_like_file(self.body):\n logger.debug(\"Redirect received, rewinding stream: %s\", self.body)\n self.reset_stream()\n\n def _looks_like_file(self, body):\n return hasattr(body, 'read') and hasattr(body, 'seek')\n\n def reset_stream(self):\n # Trying to reset a stream when there is a no stream will\n # just immediately return. It's not an error, it will produce\n # the same result as if we had actually reset the stream (we'll send\n # the entire body contents again if we need to).\n # Same case if the body is a string/bytes type.\n if self.body is None or isinstance(self.body, six.text_type) or \\\n isinstance(self.body, six.binary_type):\n return\n try:\n logger.debug(\"Rewinding stream: %s\", self.body)\n self.body.seek(0)\n except Exception as e:\n logger.debug(\"Unable to rewind stream: %s\", e)\n raise UnseekableStreamError(stream_object=self.body)\n\n def prepare_body(self, data, files, json=None):\n \"\"\"Prepares the given HTTP body data.\"\"\"\n super(KSPreparedRequest, self).prepare_body(data, files, json)\n\n # Calculate the Content-Length by trying to seek the file as\n # requests cannot determine content length for some seekable file-like\n # objects.\n if 'Content-Length' not in self.headers:\n if hasattr(data, 'seek') and hasattr(data, 'tell'):\n orig_pos = data.tell()\n data.seek(0, 2)\n end_file_pos = data.tell()\n self.headers['Content-Length'] = str(end_file_pos - orig_pos)\n data.seek(orig_pos)\n # If the Content-Length was added this way, a\n # Transfer-Encoding was added by requests because it did\n # not add a Content-Length header. However, the\n # Transfer-Encoding header is not supported for\n # KSYUN Services so remove it if it is added.\n if 'Transfer-Encoding' in self.headers:\n self.headers.pop('Transfer-Encoding')\n\nHTTPSConnectionPool.ConnectionCls = KSHTTPSConnection\nHTTPConnectionPool.ConnectionCls = KSHTTPConnection\n\n\nlogger = logging.getLogger(__name__)\n\n\nEMPTY_SHA256_HASH = (\n 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')\n# This is the buffer size used when calculating sha256 checksums.\n# Experimenting with various buffer sizes showed that this value generally\n# gave the best result (in terms of performance).\nPAYLOAD_BUFFER = 1024 * 1024\nISO8601 = '%Y-%m-%dT%H:%M:%SZ'\nSIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ'\nSIGNED_HEADERS_BLACKLIST = [\n 'expect',\n 'user-agent'\n]\n\n\nclass BaseSigner(object):\n REQUIRES_REGION = False\n\n def add_auth(self, request):\n raise NotImplementedError(\"add_auth\")\n\n\nReadOnlyCredentials = namedtuple('ReadOnlyCredentials',\n ['access_key', 'secret_key', 'token'])\n\n\nclass Credentials(object):\n \"\"\"\n Holds the credentials needed to authenticate requests.\n\n :ivar access_key: The access key part of the credentials.\n :ivar secret_key: The secret key part of the credentials.\n :ivar token: The security token, valid only for session credentials.\n :ivar method: A string which identifies where the credentials\n were found.\n \"\"\"\n\n def __init__(self, access_key, secret_key, token=None,\n method=None):\n self.access_key = access_key\n self.secret_key = secret_key\n self.token = token\n\n if method is None:\n method = 'explicit'\n self.method = method\n\n self._normalize()\n\n def _normalize(self):\n # Keys would sometimes (accidentally) contain non-ascii characters.\n # It would cause a confusing UnicodeDecodeError in Python 2.\n # We explicitly convert them into unicode to avoid such error.\n #\n # Eventually the service will decide whether to accept the credential.\n # This also complies with the behavior in Python 3.\n\n self.access_key = self.access_key\n self.secret_key = self.secret_key\n\n def get_frozen_credentials(self):\n return ReadOnlyCredentials(self.access_key,\n self.secret_key,\n self.token)\n\n\nclass SigV4Auth(BaseSigner):\n \"\"\"\n Sign a request with Signature V4.\n \"\"\"\n REQUIRES_REGION = True\n\n def __init__(self, credentials, service_name, region_name):\n self.credentials = credentials\n # We initialize these value here so the unit tests can have\n # valid values. But these will get overriden in ``add_auth``\n # later for real requests.\n self._region_name = region_name\n self._service_name = service_name\n\n def _sign(self, key, msg, hex=False):\n if hex:\n sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()\n else:\n sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()\n return sig\n\n def headers_to_sign(self, request):\n \"\"\"\n Select the headers from the request that need to be included\n in the StringToSign.\n \"\"\"\n header_map = HTTPHeaders()\n split = urlsplit(request.url)\n for name, value in request.headers.items():\n lname = name.lower()\n if lname not in SIGNED_HEADERS_BLACKLIST:\n header_map[lname] = value\n if 'host' not in header_map:\n header_map['host'] = split.netloc\n return header_map\n\n def canonical_query_string(self, request):\n # The query string can come from two parts. One is the\n # params attribute of the request. The other is from the request\n # url (in which case we have to re-split the url into its components\n # and parse out the query string component).\n if request.params:\n return self._canonical_query_string_params(request.params)\n else:\n return self._canonical_query_string_url(urlsplit(request.url))\n\n def _canonical_query_string_params(self, params):\n l = []\n for param in sorted(params):\n value = str(params[param])\n l.append('%s=%s' % (quote(param, safe='-_.~'),\n quote(value, safe='-_.~')))\n cqs = '&'.join(l)\n return cqs\n\n def _canonical_query_string_url(self, parts):\n canonical_query_string = ''\n if parts.query:\n # [(key, value), (key2, value2)]\n key_val_pairs = []\n for pair in parts.query.split('&'):\n key, _, value = pair.partition('=')\n key_val_pairs.append((key, value))\n sorted_key_vals = []\n # Sort by the key names, and in the case of\n # repeated keys, then sort by the value.\n for key, value in sorted(key_val_pairs):\n sorted_key_vals.append('%s=%s' % (key, value))\n canonical_query_string = '&'.join(sorted_key_vals)\n return canonical_query_string\n\n def canonical_headers(self, headers_to_sign):\n \"\"\"\n Return the headers that need to be included in the StringToSign\n in their canonical form by converting all header keys to lower\n case, sorting them in alphabetical order and then joining\n them into a string, separated by newlines.\n \"\"\"\n headers = []\n sorted_header_names = sorted(set(headers_to_sign))\n for key in sorted_header_names:\n value = ','.join(v.strip() for v in\n sorted(headers_to_sign.get_all(key)))\n headers.append('%s:%s' % (key, value))\n return '\\n'.join(headers)\n\n def signed_headers(self, headers_to_sign):\n l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]\n l = sorted(l)\n return ';'.join(l)\n\n def payload(self, request):\n if request.body and hasattr(request.body, 'seek'):\n position = request.body.tell()\n read_chunksize = functools.partial(request.body.read,\n PAYLOAD_BUFFER)\n checksum = sha256()\n for chunk in iter(read_chunksize, b''):\n checksum.update(chunk)\n hex_checksum = checksum.hexdigest()\n request.body.seek(position)\n return hex_checksum\n elif request.body:\n # The request serialization has ensured that\n # request.body is a bytes() type.\n return sha256(request.body).hexdigest()\n else:\n return EMPTY_SHA256_HASH\n\n def canonical_request(self, request):\n cr = [request.method.upper()]\n path = self._normalize_url_path(urlsplit(request.url).path)\n cr.append(path)\n cr.append(self.canonical_query_string(request))\n headers_to_sign = self.headers_to_sign(request)\n cr.append(self.canonical_headers(headers_to_sign) + '\\n')\n cr.append(self.signed_headers(headers_to_sign))\n if 'X-Amz-Content-SHA256' in request.headers:\n body_checksum = request.headers['X-Amz-Content-SHA256']\n else:\n body_checksum = self.payload(request)\n cr.append(body_checksum)\n return '\\n'.join(cr)\n\n def _normalize_url_path(self, path):\n normalized_path = quote(normalize_url_path(path), safe='/~')\n return normalized_path\n\n def scope(self, request):\n scope = [self.credentials.access_key]\n scope.append(request.context['timestamp'][0:8])\n scope.append(self._region_name)\n scope.append(self._service_name)\n scope.append('aws4_request')\n return '/'.join(scope)\n\n def credential_scope(self, request):\n scope = []\n scope.append(request.context['timestamp'][0:8])\n scope.append(self._region_name)\n scope.append(self._service_name)\n scope.append('aws4_request')\n return '/'.join(scope)\n\n def string_to_sign(self, request, canonical_request):\n \"\"\"\n Return the canonical StringToSign as well as a dict\n containing the original version of all headers that\n were included in the StringToSign.\n \"\"\"\n sts = ['AWS4-HMAC-SHA256']\n sts.append(request.context['timestamp'])\n sts.append(self.credential_scope(request))\n sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())\n return '\\n'.join(sts)\n\n def signature(self, string_to_sign, request):\n key = self.credentials.secret_key\n k_date = self._sign(('AWS4' + key).encode('utf-8'),\n request.context['timestamp'][0:8])\n k_region = self._sign(k_date, self._region_name)\n k_service = self._sign(k_region, self._service_name)\n k_signing = self._sign(k_service, 'aws4_request')\n return self._sign(k_signing, string_to_sign, hex=True)\n\n def add_auth(self, request):\n if self.credentials is None:\n raise NoCredentialsError\n datetime_now = datetime.datetime.utcnow()\n request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)\n # This could be a retry. Make sure the previous\n # authorization header is removed first.\n self._modify_request_before_signing(request)\n canonical_request = self.canonical_request(request)\n logger.debug(\"Calculating signature using v4 auth.\")\n logger.debug('CanonicalRequest:\\n%s', canonical_request)\n string_to_sign = self.string_to_sign(request, canonical_request)\n logger.debug('StringToSign:\\n%s', string_to_sign)\n signature = self.signature(string_to_sign, request)\n logger.debug('Signature:\\n%s', signature)\n\n self._inject_signature_to_request(request, signature)\n\n def _inject_signature_to_request(self, request, signature):\n l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]\n headers_to_sign = self.headers_to_sign(request)\n l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))\n l.append('Signature=%s' % signature)\n request.headers['Authorization'] = ', '.join(l)\n return request\n\n def _modify_request_before_signing(self, request):\n if 'Authorization' in request.headers:\n del request.headers['Authorization']\n self._set_necessary_date_headers(request)\n if self.credentials.token:\n if 'X-Amz-Security-Token' in request.headers:\n del request.headers['X-Amz-Security-Token']\n request.headers['X-Amz-Security-Token'] = self.credentials.token\n\n def _set_necessary_date_headers(self, request):\n # The spec allows for either the Date _or_ the X-Amz-Date value to be\n # used so we check both. If there's a Date header, we use the date\n # header. Otherwise we use the X-Amz-Date header.\n if 'Date' in request.headers:\n del request.headers['Date']\n datetime_timestamp = datetime.datetime.strptime(\n request.context['timestamp'], SIGV4_TIMESTAMP)\n request.headers['Date'] = formatdate(\n int(calendar.timegm(datetime_timestamp.timetuple())))\n if 'X-Amz-Date' in request.headers:\n del request.headers['X-Amz-Date']\n else:\n if 'X-Amz-Date' in request.headers:\n del request.headers['X-Amz-Date']\n request.headers['X-Amz-Date'] = request.context['timestamp']\n\n\nclass S3SigV4Auth(SigV4Auth):\n\n def _modify_request_before_signing(self, request):\n super(S3SigV4Auth, self)._modify_request_before_signing(request)\n if 'X-Amz-Content-SHA256' in request.headers:\n del request.headers['X-Amz-Content-SHA256']\n request.headers['X-Amz-Content-SHA256'] = self.payload(request)\n\n def _normalize_url_path(self, path):\n # For S3, we do not normalize the path.\n return path\n\n\nclass SigV4QueryAuth(SigV4Auth):\n DEFAULT_EXPIRES = 3600\n\n def __init__(self, credentials, service_name, region_name,\n expires=DEFAULT_EXPIRES):\n super(SigV4QueryAuth, self).__init__(credentials, service_name,\n region_name)\n self._expires = expires\n\n def _modify_request_before_signing(self, request):\n # Note that we're not including X-Amz-Signature.\n # From the docs: \"The Canonical Query String must include all the query\n # parameters from the preceding table except for X-Amz-Signature.\n signed_headers = self.signed_headers(self.headers_to_sign(request))\n auth_params = {\n 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',\n 'X-Amz-Credential': self.scope(request),\n 'X-Amz-Date': request.context['timestamp'],\n 'X-Amz-Expires': self._expires,\n 'X-Amz-SignedHeaders': signed_headers,\n }\n if self.credentials.token is not None:\n auth_params['X-Amz-Security-Token'] = self.credentials.token\n # Now parse the original query string to a dict, inject our new query\n # params, and serialize back to a query string.\n url_parts = urlsplit(request.url)\n # parse_qs makes each value a list, but in our case we know we won't\n # have repeated keys so we know we have single element lists which we\n # can convert back to scalar values.\n query_dict = dict(\n [(k, v[0]) for k, v in parse_qs(url_parts.query).items()])\n # The spec is particular about this. It *has* to be:\n # https://?&\n # You can't mix the two types of params together, i.e just keep doing\n # new_query_params.update(op_params)\n # new_query_params.update(auth_params)\n # percent_encode_sequence(new_query_params)\n operation_params = ''\n if request.data:\n # We also need to move the body params into the query string.\n # request.data will be populated, for example, with query services\n # which normally form encode the params into the body.\n # This means that request.data is a dict() of the operation params.\n query_dict.update(request.data)\n request.data = ''\n if query_dict:\n operation_params = percent_encode_sequence(query_dict) + '&'\n new_query_string = (operation_params +\n percent_encode_sequence(auth_params))\n # url_parts is a tuple (and therefore immutable) so we need to create\n # a new url_parts with the new query string.\n # - \n # scheme - 0\n # netloc - 1\n # path - 2\n # query - 3 <-- we're replacing this.\n # fragment - 4\n p = url_parts\n new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])\n request.url = urlunsplit(new_url_parts)\n\n def _inject_signature_to_request(self, request, signature):\n # Rather than calculating an \"Authorization\" header, for the query\n # param quth, we just append an 'X-Amz-Signature' param to the end\n # of the query string.\n request.url += '&X-Amz-Signature=%s' % signature\n\n\nclass S3SigV4QueryAuth(SigV4QueryAuth):\n \"\"\"S3 SigV4 auth using query parameters.\n\n This signer will sign a request using query parameters and signature\n version 4, i.e a \"presigned url\" signer.\n\n\n \"\"\"\n def _normalize_url_path(self, path):\n # For S3, we do not normalize the path.\n return path\n\n def payload(self, request):\n # From the doc link above:\n # \"You don't include a payload hash in the Canonical Request, because\n # when you create a presigned URL, you don't know anything about the\n # payload. Instead, you use a constant string \"UNSIGNED-PAYLOAD\".\n return \"UNSIGNED-PAYLOAD\"\n\n\nclass S3SigV4PostAuth(SigV4Auth):\n \"\"\"\n Presigns a s3 post\n\n \"\"\"\n def add_auth(self, request):\n datetime_now = datetime.datetime.utcnow()\n request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)\n\n fields = {}\n if request.context.get('s3-presign-post-fields', None) is not None:\n fields = request.context['s3-presign-post-fields']\n\n policy = {}\n conditions = []\n if request.context.get('s3-presign-post-policy', None) is not None:\n policy = request.context['s3-presign-post-policy']\n if policy.get('conditions', None) is not None:\n conditions = policy['conditions']\n\n policy['conditions'] = conditions\n\n fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256'\n fields['x-amz-credential'] = self.scope(request)\n fields['x-amz-date'] = request.context['timestamp']\n\n conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'})\n conditions.append({'x-amz-credential': self.scope(request)})\n conditions.append({'x-amz-date': request.context['timestamp']})\n\n if self.credentials.token is not None:\n fields['x-amz-security-token'] = self.credentials.token\n conditions.append({'x-amz-security-token': self.credentials.token})\n\n # Dump the base64 encoded policy into the fields dictionary.\n fields['policy'] = base64.b64encode(\n json.dumps(policy).encode('utf-8')).decode('utf-8')\n\n fields['x-amz-signature'] = self.signature(fields['policy'], request)\n\n request.context['s3-presign-post-fields'] = fields\n request.context['s3-presign-post-policy'] = policy\n\n\nAUTH_TYPE_MAPS = {\n 'v4': SigV4Auth,\n 'v4-query': SigV4QueryAuth,\n 's3v4': S3SigV4Auth,\n 's3v4-query': S3SigV4QueryAuth,\n 's3v4-presign-post': S3SigV4PostAuth,\n\n}\n\n\n\nif __name__ == '__main__':\n access_key = \"AKLTJZEjW05lQEGx1Z_g07AazA\"\n secret_key = \"OAcfe1+lkHucQoaVMUbIhlaDK2D8QuFMv4jHiRRtgtNqYVaEOWLv3MaRZAlk565hRg==\"\n credentials = Credentials(access_key, secret_key)\n v4 = SigV4Auth(credentials,\"iam\", \"cn-beijing-6\")\n request_dict = {'context': '', 'url': 'http://10.100.50.90', 'headers': {}, 'method': 'GET', 'params': '', 'body': ''}\n request = create_request_object(request_dict)\n print(v4.add_auth(request))\n", "sub_path": "Tools/AWSTEST.py", "file_name": "AWSTEST.py", "file_ext": "py", "file_size_in_byte": 37220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 58, "usage_type": "call"}, {"api_name": "kscore.compat.HTTPResponse", "line_number": 61, "usage_type": "name"}, {"api_name": "kscore.compat.HTTPResponse.__init__", "line_number": 66, "usage_type": "call"}, {"api_name": "kscore.compat.HTTPResponse", "line_number": 66, "usage_type": "name"}, {"api_name": "kscore.compat.HTTPResponse._read_status", "line_number": 74, "usage_type": "call"}, {"api_name": "kscore.compat.HTTPResponse", "line_number": 74, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 77, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection.__init__", "line_number": 90, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 90, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection.close", "line_number": 104, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 104, "usage_type": "name"}, {"api_name": "sys.version_info", "line_number": 118, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection._tunnel", "line_number": 119, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 119, "usage_type": "name"}, {"api_name": "socket.error", "line_number": 133, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection._send_request", "line_number": 149, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 149, "usage_type": "name"}, {"api_name": "kscore.compat.six.text_type", "line_number": 160, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 160, "usage_type": "name"}, {"api_name": "select.select", "line_number": 183, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 246, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection.send", "line_number": 262, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 262, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.VerifiedHTTPSConnection", "line_number": 272, "usage_type": "name"}, {"api_name": "inspect.isfunction", "line_number": 280, "usage_type": "call"}, {"api_name": "kscore.utils.percent_encode_sequence", "line_number": 307, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 335, "usage_type": "call"}, {"api_name": "kscore.compat.urlunsplit", "line_number": 352, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models.RequestEncodingMixin", "line_number": 356, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models", "line_number": 356, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.models.Request", "line_number": 356, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models.Request.__init__", "line_number": 362, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models.Request", "line_number": 362, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models", "line_number": 362, "usage_type": "name"}, {"api_name": "kscore.compat.HTTPHeaders", "line_number": 363, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models.PreparedRequest", "line_number": 391, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models", "line_number": 391, "usage_type": "name"}, {"api_name": "kscore.compat.six.text_type", "line_number": 394, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 394, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.models.PreparedRequest", "line_number": 399, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models", "line_number": 399, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.sessions.REDIRECT_STATI", "line_number": 422, "usage_type": "name"}, {"api_name": "kscore.compat.six.text_type", "line_number": 436, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 436, "usage_type": "name"}, {"api_name": "kscore.compat.six.binary_type", "line_number": 437, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 437, "usage_type": "name"}, {"api_name": "kscore.exceptions.UnseekableStreamError", "line_number": 444, "usage_type": "call"}, {"api_name": "kscore.compat.json", "line_number": 448, "usage_type": "argument"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPSConnectionPool.ConnectionCls", "line_number": 468, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPSConnectionPool", "line_number": 468, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPConnectionPool.ConnectionCls", "line_number": 469, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPConnectionPool", "line_number": 469, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 472, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 496, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 556, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 556, "usage_type": "argument"}, {"api_name": "hmac.new", "line_number": 558, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 558, "usage_type": "argument"}, {"api_name": "kscore.compat.HTTPHeaders", "line_number": 566, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 567, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 584, "usage_type": "call"}, {"api_name": "kscore.compat.quote", "line_number": 590, "usage_type": "call"}, {"api_name": "kscore.compat.quote", "line_number": 591, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 634, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 636, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 645, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 651, "usage_type": "call"}, {"api_name": "kscore.compat.quote", "line_number": 665, "usage_type": "call"}, {"api_name": "kscore.utils.normalize_url_path", "line_number": 665, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 693, "usage_type": "call"}, {"api_name": "kscore.exceptions.NoCredentialsError", "line_number": 707, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 708, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 708, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 746, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 746, "usage_type": "attribute"}, {"api_name": "email.utils.formatdate", "line_number": 748, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 749, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 796, "usage_type": "call"}, {"api_name": "kscore.compat.parse_qs", "line_number": 801, "usage_type": "call"}, {"api_name": "kscore.utils.percent_encode_sequence", "line_number": 817, "usage_type": "call"}, {"api_name": "kscore.utils.percent_encode_sequence", "line_number": 819, "usage_type": "call"}, {"api_name": "kscore.compat.urlunsplit", "line_number": 830, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 865, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 865, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 894, "usage_type": "call"}, {"api_name": "kscore.compat.json.dumps", "line_number": 895, "usage_type": "call"}, {"api_name": "kscore.compat.json", "line_number": 895, "usage_type": "name"}]} +{"seq_id": "131248341", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n\n# !/usr/bin/env python\n\n# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nfrom model.config import cfg\nfrom model.test import im_detect\nfrom model.test import im_detect_feat\n\nfrom layer_utils.roi_layers import nms\n\nfrom utils.timer import Timer\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, cv2\nimport argparse\nimport json\n\nfrom nets.vgg16 import vgg16\nfrom nets.resnet_v1 import resnetv1, resnet101\nfrom multiprocessing import Process\n\nimport torch\n\nimport pdb\n\nCLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',\n 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',\n 'tvmonitor')\n\nNETS = {\n 'vgg16': ('vgg16_faster_rcnn_iter_%d.pth',),\n 'res101': ('res101_faster_rcnn_iter_%d.pth',)\n}\nDATASETS = {\n 'pascal_voc': ('voc_2007_trainval',),\n 'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)\n}\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\n\ndef vis_detections(im, class_name, dets, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1],\n fill=False,\n edgecolor='red',\n linewidth=3.5))\n ax.text(\n bbox[0],\n bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14,\n color='white')\n\n ax.set_title(\n ('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name, thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n\n\ndef demo(net, image_name):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(\n timer.total_time(), boxes.shape[0]))\n\n # Visualize detections for each class\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(\n torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),\n NMS_THRESH)\n dets = dets[keep.numpy(), :]\n vis_detections(im, cls, dets, thresh=CONF_THRESH)\n\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description='Tensorflow Faster R-CNN demo')\n parser.add_argument(\n '--net',\n dest='demo_net',\n help='Network to use [vgg16 res101]',\n choices=NETS.keys(),\n default='res101')\n parser.add_argument(\n '--dataset',\n dest='dataset',\n help='Trained dataset [pascal_voc pascal_voc_0712]',\n choices=DATASETS.keys(),\n default='pascal_voc_0712')\n args = parser.parse_args()\n\n return args\n\n\ndef load_image_ids(split_name):\n ''' Load a list of (path,image_id tuples). Modify this to suit your data locations. '''\n split = []\n base_dir = '/DATA/disk1/zhangming6/Datasets/AI_Challenger_2017/caption/raw_data/train_20170902'\n\n if split_name == 'coco_test2014':\n with open('/data/coco/annotations/image_info_test2014.json') as f:\n data = json.load(f)\n for item in data['images']:\n image_id = int(item['id'])\n filepath = os.path.join('/data/test2014/', item['file_name'])\n split.append((filepath, image_id))\n elif split_name == 'coco_test2015':\n with open('/data/coco/annotations/image_info_test2015.json') as f:\n data = json.load(f)\n for item in data['images']:\n image_id = int(item['id'])\n filepath = os.path.join('/data/test2015/', item['file_name'])\n split.append((filepath, image_id))\n elif split_name == 'genome':\n with open('/data/visualgenome/image_data.json') as f:\n for item in json.load(f):\n image_id = int(item['image_id'])\n filepath = os.path.join('/data/visualgenome/', item['url'].split('rak248/')[-1])\n split.append((filepath, image_id))\n elif split_name == 'chinese_train':\n with open(base_dir + '/caption_train_annotations_20170902.json') as f:\n for item in json.load(f):\n image_id = item['image_id']\n filepath = os.path.join(base_dir + '/caption_train_images_20170902', image_id)\n split.append((filepath, image_id))\n elif split_name == 'chinese_val':\n with open(base_dir + '/caption_validation_annotations_20170910.json') as f:\n for item in json.load(f):\n image_id = item['image_id']\n filepath = os.path.join(base_dir + '/caption_validation_images_20170910', image_id)\n split.append((filepath, image_id))\n elif split_name == 'chinese_test1':\n with open(base_dir + '/caption_test1_annotations_20170923.json') as f:\n for item in json.load(f):\n image_id = item['image_id']\n filepath = os.path.join(base_dir + '/caption_test1_images_20170923', image_id)\n split.append((filepath, image_id))\n else:\n print\n 'Unknown split'\n return split\n\n\ndef feature_gen(net, image_name):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n im = cv2.imread(im_file)\n\n scores, boxes, pool5 = im_detect(net, im)\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(\n torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),\n NMS_THRESH)\n dets = dets[keep.numpy(), :]\n pool5_select = pool5[keep.numpy(), :]\n # path = os.path.abspath(os.path.dirname(__file__)+'/../data/test/')\n path = 'demo_res/'\n np.save(path + 'fc.npy', pool5_select.mean(0))\n np.savez_compressed(path + 'att.npz', feat=pool5_select)\n np.save(path + 'box.npy', dets)\n\n print('Done!')\n\n\ndef feature_gen_multi(net, image_list, outpath):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n count = 0\n sum = len(image_list)\n for img_file, img_id in image_list:\n im_file = os.path.join(img_file)\n im = cv2.imread(im_file)\n\n scores, boxes, pool5 = im_detect(net, im)\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(\n torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),\n NMS_THRESH)\n dets = dets[keep.numpy(), :]\n pool5_select = pool5[keep.numpy(), :]\n\n np.save(outpath + 'chinese_bu_fc/' + img_id + '.npy', pool5_select.mean(0))\n np.savez_compressed(outpath + 'chinese_bu_att/' + img_id + '.npz', feat=pool5_select)\n np.save(outpath + 'chinese_bu_box/' + img_id + '.npy', dets)\n\n count += 1\n if count % 100 == 0:\n print('{}/{}:{:.2f}%'.format(count, sum, (count / sum) * 100))\n\n print('Done!')\n\n\ndef single_img(net):\n im_names = [\n 'a2af7deaa01abca741477820bbf37b340df02a88.jpg'\n # 'test_wave.jpg'\n ]\n for im_name in im_names:\n print('*' * 26)\n print('Demo for data/demo/{}'.format(im_name))\n # demo(net, im_name)\n feature_gen(net, im_name)\n\n\ndef multi_img(net):\n split_num = 2\n image_ids = load_image_ids('chinese_train')\n # Split image ids between gpus\n image_ids_split = [image_ids[i::split_num] for i in range(split_num)]\n\n procs = []\n outfile = '/DATA/disk1/zhangming6/Datasets/AI_Challenger_2017/caption/bottom_up_zm/'\n\n multi_process = False\n if multi_process: # 暂不可用\n for i in range(split_num):\n p = Process(target=feature_gen_multi,\n args=(i, net, image_ids_split[i], outfile))\n p.daemon = True\n p.start()\n procs.append(p)\n for p in procs:\n p.join()\n else:\n feature_gen_multi(net, image_ids, outfile)\n\n\nif __name__ == '__main__':\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n args = parse_args()\n\n # model path\n demonet = args.demo_net\n dataset = args.dataset\n saved_model = os.path.join(\n 'output', demonet, DATASETS[dataset][0], 'default',\n NETS[demonet][0] % (70000 if dataset == 'pascal_voc' else 110000))\n\n if not os.path.isfile(saved_model):\n raise IOError(\n ('{:s} not found.\\nDid you download the proper networks from '\n 'our server and place them properly?').format(saved_model))\n\n # load network\n\n if demonet == 'vgg16':\n net = vgg16()\n elif demonet == 'res101':\n net = resnetv1(num_layers=101)\n else:\n raise NotImplementedError\n net.create_architecture(21, tag='default', anchor_scales=[8, 16, 32])\n\n net.load_state_dict(\n torch.load(saved_model, map_location=lambda storage, loc: storage))\n\n # net = resnet101(True)\n\n net.eval()\n if not torch.cuda.is_available():\n net._device = 'cpu'\n net.to(net._device)\n\n print('Loaded network {:s}'.format(saved_model))\n\n # single_img(net)\n multi_img(net)\n", "sub_path": "tools/chinese_feature_gen.py", "file_name": "chinese_feature_gen.py", "file_ext": "py", "file_size_in_byte": 11110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Rectangle", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "model.config.cfg.DATA_DIR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "model.config.cfg", "line_number": 95, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.timer.Timer", "line_number": 99, "usage_type": "call"}, {"api_name": "model.test.im_detect", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 114, "usage_type": "attribute"}, {"api_name": "layer_utils.roi_layers.nms", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 116, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 124, "usage_type": "call"}, {"api_name": "json.load", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "model.config.cfg.DATA_DIR", "line_number": 196, "usage_type": "attribute"}, {"api_name": "model.config.cfg", "line_number": 196, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 197, "usage_type": "call"}, {"api_name": "model.test.im_detect", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 208, "usage_type": "attribute"}, {"api_name": "layer_utils.roi_layers.nms", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 230, "usage_type": "call"}, {"api_name": "model.test.im_detect", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 241, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 241, "usage_type": "attribute"}, {"api_name": "layer_utils.roi_layers.nms", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 250, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 283, "usage_type": "call"}, {"api_name": "model.config.cfg.TEST", "line_number": 295, "usage_type": "attribute"}, {"api_name": "model.config.cfg", "line_number": 295, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 301, "usage_type": "call"}, {"api_name": "os.path", "line_number": 301, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 305, "usage_type": "call"}, {"api_name": "os.path", "line_number": 305, "usage_type": "attribute"}, {"api_name": "nets.vgg16.vgg16", "line_number": 313, "usage_type": "call"}, {"api_name": "nets.resnet_v1.resnetv1", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 321, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 326, "usage_type": "attribute"}]} +{"seq_id": "337235534", "text": "# This file is part of Indico.\n# Copyright (C) 2002 - 2019 CERN\n#\n# Indico is free software; you can redistribute it and/or\n# modify it under the terms of the MIT License; see the\n# LICENSE file for more details.\n\nfrom __future__ import unicode_literals\n\nfrom flask import render_template\n\nfrom indico.core.notifications import email_sender, make_email\n\n\n@email_sender\ndef notify_amount_inconsistency(registration, amount, currency):\n event = registration.registration_form.event\n to = event.creator.email\n body = render_template('events/payment/emails/payment_inconsistency_email_to_manager.txt',\n event=event, registration=registration, amount=amount, currency=currency)\n return make_email(to, subject='Payment inconsistency', body=body)\n", "sub_path": "indico/modules/events/payment/notifications.py", "file_name": "notifications.py", "file_ext": "py", "file_size_in_byte": 778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "indico.core.notifications.make_email", "line_number": 21, "usage_type": "call"}, {"api_name": "indico.core.notifications.email_sender", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "313463747", "text": "from sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical\nimport winsound as ws\nimport numpy as np\n\nfrom utils import TrainModule\n\nnp_loader = np.load(\"TFSR_80_n_mfcc.npz\")\nx_data, y_data = np.expand_dims(np_loader[\"x_norm_data\"], axis=-1), to_categorical(np_loader[\"y_data\"])\nx_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, shuffle=True)\n\ntm_mfcc = TrainModule(result_file_name=\"TFSR_80_n_mfcc_training_result_conv2d\",\n input_shape=np.shape(x_train)[1:],\n output_shape=np.shape(y_train)[1]\n )\n\nmodel1 = tm_mfcc.create_conv2d_model()\n\nckpt_path = CHECKPOINT_PATH\nmodel_path = MODEL_SAVE_PATH\n\ntm_mfcc.training(\n model=model1,\n x_train=x_train,\n y_train=y_train,\n ckpt_path=ckpt_path,\n model_path=model_path,\n x_test=x_test,\n y_test=y_test\n)\n\nws.Beep(2000, 1000)\n", "sub_path": "training_mfcc_2d.py", "file_name": "training_mfcc_2d.py", "file_ext": "py", "file_size_in_byte": 926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.load", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.TrainModule", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 14, "usage_type": "call"}, {"api_name": "winsound.Beep", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "88179582", "text": "import torch.utils.data\n\nfrom vision3d.datasets import ModelNet40Dataset\nimport vision3d.transforms.functional as F\nfrom vision3d.utils.pytorch_utils import reset_numpy_random_seed\n\n\nclass TrainTransform(object):\n def __init__(self, num_point, sigma, low, high):\n self.num_point = num_point\n self.sigma = sigma\n self.low = low\n self.high = high\n\n def __call__(self, points):\n points = F.sample_point_cloud(points, self.num_point)\n points = F.random_shuffle_point_cloud(points)\n points = F.random_rescale_point_cloud(points, self.low, self.high)\n points = F.random_jitter_point_cloud(points, self.sigma)\n points = points.transpose()\n points = torch.tensor(points, dtype=torch.float)\n return points\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(\\n'\n format_string += ' SamplePointCloud(num_point={})\\n'.format(self.num_point)\n format_string += ' RandomShufflePointCloud()\\n'\n format_string += ' RandomRescalePointCloud(low={}, high={})\\n'.format(self.low, self.high)\n format_string += ' RandomJitterPointCloud(sigma={})\\n'.format(self.sigma)\n format_string += ')'\n return format_string\n\n\nclass TestTransform(object):\n def __init__(self, num_point):\n self.num_point = num_point\n\n def __call__(self, points):\n points = F.sample_point_cloud(points, self.num_point)\n points = points.transpose()\n points = torch.tensor(points, dtype=torch.float)\n return points\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(\\n'\n format_string += ' SamplePointCloud(num_point={})\\n'.format(self.num_point)\n format_string += ')'\n return format_string\n\n\ndef train_data_loader(config):\n train_transform = TrainTransform(config.train_num_point,\n config.train_jitter_sigma,\n config.train_rescale_low,\n config.train_rescale_high)\n train_dataset = ModelNet40Dataset(config.data_root, 'train', train_transform)\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n num_workers=config.train_num_worker,\n pin_memory=True,\n drop_last=True,\n worker_init_fn=reset_numpy_random_seed)\n return train_loader\n\n\ndef test_data_loader(config):\n test_transform = TestTransform(config.test_num_point)\n test_dataset = ModelNet40Dataset(config.data_root, 'test', test_transform)\n test_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=config.test_batch_size,\n num_workers=config.test_num_worker,\n worker_init_fn=reset_numpy_random_seed)\n return test_loader\n\n\nif __name__ == '__main__':\n from config import config\n\n data_loader = train_data_loader(config)\n for i, (x, y) in enumerate(data_loader):\n print(i, ': ', x.shape, y.shape)\n\n data_loader = test_data_loader(config)\n for i, (x, y) in enumerate(data_loader):\n print(i, ': ', x.shape, y.shape)\n", "sub_path": "experiments/pointnet.modelnet40.resize+jitter.adam.tnet.smooth/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 3505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "vision3d.transforms.functional.sample_point_cloud", "line_number": 16, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 16, "usage_type": "name"}, {"api_name": "vision3d.transforms.functional.random_shuffle_point_cloud", "line_number": 17, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 17, "usage_type": "name"}, {"api_name": "vision3d.transforms.functional.random_rescale_point_cloud", "line_number": 18, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 18, "usage_type": "name"}, {"api_name": "vision3d.transforms.functional.random_jitter_point_cloud", "line_number": 19, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.utils.data.tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.utils.data.float", "line_number": 21, "usage_type": "attribute"}, {"api_name": "vision3d.transforms.functional.sample_point_cloud", "line_number": 39, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.utils.data.tensor", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.utils.data.float", "line_number": 41, "usage_type": "attribute"}, {"api_name": "vision3d.datasets.ModelNet40Dataset", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 57, "usage_type": "name"}, {"api_name": "vision3d.utils.pytorch_utils.reset_numpy_random_seed", "line_number": 63, "usage_type": "name"}, {"api_name": "vision3d.datasets.ModelNet40Dataset", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 70, "usage_type": "name"}, {"api_name": "vision3d.utils.pytorch_utils.reset_numpy_random_seed", "line_number": 73, "usage_type": "name"}, {"api_name": "config.config", "line_number": 80, "usage_type": "argument"}, {"api_name": "config.config", "line_number": 84, "usage_type": "argument"}]} +{"seq_id": "96949522", "text": "from django.urls import path\nfrom .views.shift_views import (\n ShiftCreateView,\n ShiftDetailView,\n ShiftUpdateView,\n ShiftListView,\n)\nfrom .views.roster_view import(\n RosterCreateView,\n)\napp_name = 'eattendance'\nurlpatterns = [\n # shift urls\n path('shift/create/', ShiftCreateView.as_view(), name='shift-create'),\n path('shift//', ShiftDetailView.as_view(), name='shift-detail'),\n path('shift//update', ShiftUpdateView.as_view(),name='shift-update'),\n path('shifts/', ShiftListView.as_view(), name='shifts'),\n # roster urls\n path('roster/create/', RosterCreateView.as_view(), name='roster-create'),\n]", "sub_path": "Swasthya/eattendance/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftCreateView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftCreateView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftDetailView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftDetailView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftUpdateView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftUpdateView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftListView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftListView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.roster_view.RosterCreateView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "views.roster_view.RosterCreateView", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "230274000", "text": "import pika\n\nEXCHANGE_NAME = 'test-exchange'\nQUEUE_NAME = 'test-queue'\n\ndef init_rabbitmq():\n conn = pika.BlockingConnection()\n chan = conn.channel()\n\n chan.exchange_declare(EXCHANGE_NAME, 'direct')\n chan.queue_declare(QUEUE_NAME, durable=True)\n chan.queue_bind(QUEUE_NAME, EXCHANGE_NAME, \"routing.key\")\n\n conn.close()\n\n\nclass Producer(object):\n def __init__(self, conn):\n self.conn = conn\n\n def send_message(self, msg, exch, rtg_key):\n chan = self.conn.channel()\n chan.basic_publish(exch, rtg_key, msg)\n chan.close()\n\n\nclass Consumer(object):\n def __init__(self, conn):\n self.conn = conn\n\n def get_message(self, queue):\n chan = self.conn.channel()\n frame, _, body = chan.basic_get(queue)\n if frame:\n chan.basic_ack(frame.delivery_tag)\n return body\n\n\ndef hello_world():\n init_rabbitmq()\n\n conn = pika.BlockingConnection()\n\n p = Producer(conn)\n\n p.send_message(\"Hello world!\", EXCHANGE_NAME, 'routing.key')\n c = Consumer(conn)\n\n print(c.get_message(QUEUE_NAME))\n\n\nif __name__ == \"__main__\":\n hello_world()\n", "sub_path": "example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 1055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pika.BlockingConnection", "line_number": 7, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "643500346", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n## Import the related Libraries\nimport numpy as np\nimport statsmodels.api as sm ## OLS\nimport pandas as pd\nfrom scipy import stats\nfrom random import sample\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport sys\n\n## Load the data\nrawdata = np.genfromtxt(sys.argv[1], skip_header=1)\nX = rawdata[:, :-1]\ny = rawdata[:, -1]\n\ndef MLR(data, flag):\n X = data[:, :-1]\n y = data[:, -1]\n if flag == 1:\n ###### Ordinary least squares ######\n X2 = sm.add_constant(X) # for intercept\n est = sm.OLS(y, X2)\n est2 = est.fit()\n print(est2.summary())\n else:\n ###### Sklearn Linear regression ######\n Reg = LinearRegression()\n Reg.fit(X, y)\n params = np.append(Reg.intercept_, Reg.coef_)\n y_hat = Reg.predict(X)\n newX = np.append(np.ones((len(X), 1)), X, axis = 1)\n\n ## including intercept for matrix calculation\n MSE = (sum((y - y_hat)**2)) / (len(newX)-len(newX[0]))\n\n var_beta = MSE * (np.linalg.inv(np.dot(newX.T, newX)).diagonal())\n s_beta = np.sqrt(var_beta)\n t_beta = params / s_beta\n\n p_values = [2 * (1 - stats.t.cdf(np.abs(t), ( len(newX) - len(newX[0]) - 1))) for t in t_beta]\n\n # 반올림 작업.\n sd_b = np.round(s_beta, 3) ## Std.Errors of Coefficient\n ts_b = np.round(t_beta, 3) ## t-value\n p_values = np.round(p_values, 6) ## P-value\n params = np.round(params, 4) ## Coefficients\n\n R_squared = r2_score(y, y_hat)\n\n # Result table\n Result = pd.DataFrame()\n Result[\"Coefficients\"], Result[\"Std Error\"], Result[\"t values\"], Result[\"P-value\"], Result[\"MSE\"], Result[\"R-squared\"] = [params, sd_b, ts_b, p_values, MSE, R_squared]\n print(Result)\n return None\n\nMLR(X, int(sys.argv[2]))\n", "sub_path": "Exercise-6/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.genfromtxt", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "statsmodels.api.add_constant", "line_number": 24, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 24, "usage_type": "name"}, {"api_name": "statsmodels.api.OLS", "line_number": 25, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 25, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.stats.t.cdf", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.stats.t", "line_number": 43, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "546804568", "text": "# Dieses Skript Tauscht den HTML Code zwischen Orignal Anfrage und Proxy Abfrage aus\n# Skript in Kombination mit Firefox verwenden\n# Wir müssen jedoch noch die Proxy Einstellungen im Browser anpassen: Manuell Proxy Config \"127.0.0.1\", 7654\n# damit unsere Anfrage auf unseren Proxy Server umgeleitet wird\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\nfrom socketserver import ThreadingMixIn # Für Verbesserung der Perfomance\n\nimport requests\nimport random\nimport urllib\n\n# Vererbung:Nimm gesamten Rahmen von BaseHTTPReqestHandler aber tausche ein paar Sachen aus\nclass MyRequestHandler(BaseHTTPRequestHandler):\n\n def do_POST(self):\n print(self.path)\n print(self.headers) # Header wenn der Browser dem Proxy Daten schickt\n if self.headers[\"content-type\"] == \"application/x-www-form-urlencoded\": # Wenne es sich um Typ Formular handelt\n length = int(self.headers[\"content-length\"]) # Länge des Formularinhalts als integer ermitteln\n print(length)\n read_form_raw = str(self.rfile.read(length), \"utf-8\") # Formulardaten lesen (raw)\n data = urllib.parse.parse_qs(read_form_raw) # Raw Formular zerlegen in strukturierte Form (dict) umwandeln\n\n with requests.post(self.path, data=data, stream=True) as res: # Schicke Post Requests an Server mit Formulardaten data ,welche wir gerade eben auzsgelesen haben\n\n self.send_response(res.status_code) # ABSOLUT NOTWENDIGE ZEILE. Statuscode muss immer an Browser mitgeteilt werden. Weiterleiten den Angefragten Pfades vom Broswser\n # Headers 1 zu 1 an Browser weiterleiten\n #print(res.headers) # res.headers ist ein Dictionary\n for key, value in res.headers.items(): # Auflösung Dictionary\n self.send_header(key, value)\n self.end_headers()\n\n # Informationen an unseren Browser schicken. Geht nur in Byteform -> Daher wird Str encoded in Bytes\n self.wfile.write(res.raw.read()) # Gibt die Rohdaten die von der Seite gesendet wurden weiter an Browser\n\n def do_GET(self):\n\n if self.path[-4:] == \".jpg\": # Nur wenn folgende Dateiendeung\n\n # Für anderes Bild\n self.send_response(200)\n self.send_header(\"Content-Type\", \"image/jpeg\") # Text\n self.end_headers()\n\n images = [\"./Bilder/1.jpg\", \"./Bilder/2.jpg\"]\n\n with open(random.choice(images), \"rb\") as file:\n self.wfile.write(file.read()) # Text\n\n else:\n # Einrücken Notwendig, damit wir kein Memory Leak haben und damit wir in Variable res zusätzliche eigenschaft haben um auf Rohdaten (stream) zugreifen zu können\n with requests.get(self.path, stream=True) as res: # Herunterladen des angefragten Pfades\n\n self.send_response(res.status_code) # Weiterleiten den Angefragten Pfades vom Broswser\n\n print(res.headers) # res Headers -> Original Server Headers an Proxy die (als Dictionary)\n if \"text/html\" in res.headers[\"content-type\"]: # Wenn es sich um html Datei handelt\n self.send_header(\"Content-Type\", \"text/html\") # Bezieht sich auf die Headers die unser Proxy an den Browser schickt.\n print(res.content) # Enthält Originalinhalt was Server geantwortet hat\n content = str(res.content, \"utf-8\") # Interne Übergabe als String mit utf-8 Format\n content = content.replace(\"Bilder\", \"Katzenbilder\") # Ersetzt in HTML das Wort Bilder durch Katzenbilder\n #self.wfile.write(res.content, encode()) # Senden des Originalinhaltes\n self.wfile.write(content, encode()) # Senden unserer Message\n\n else:\n # Headers 1 zu 1 an Browser weiterleiten\n #print(res.headers) # res.headers ist ein Dictionary\n for key, value in res.headers.items(): # Auflösung Dictionary\n self.send_header(key, value)\n self.end_headers()\n\n # Informationen an unseren Browser schicken. Geht nur in Byteform -> Daher wird Str encoded in Bytes\n self.wfile.write(res.raw.read()) # Gibt die Rohdaten die von der Seite gesendet wurden weiter an Browser\n\n# Optimierung -> Kombination aus ThreadMixIn, HTTPServer (Mehrfachvererbung)\nclass ThreadingHTTPServer(ThreadingMixIn, HTTPServer): #\n pass\n\naddress = (\"127.0.0.1\", 7654) # IP Adresse (entsprechend dem Computer auf dem der Server läuft) und Port -> http://127.0.0.1:7654\n\nserver = ThreadingHTTPServer(address, MyRequestHandler) # ThreadingHTTP Server Adresse zuweisen, und verhalte dich entsprechend MyRequestHandler\nserver.serve_forever() # Server Starten und halte diesen am laufen", "sub_path": "01_Tutorials/Udemy Kurs Ehical Hacking/09_Praxis_MITM_mit_HTTP-Proxy/105_HTTP_Proxy_Server_Fomular_auslesen.py", "file_name": "105_HTTP_Proxy_Server_Fomular_auslesen.py", "file_ext": "py", "file_size_in_byte": 4838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 14, "usage_type": "name"}, {"api_name": "urllib.parse.parse_qs", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 23, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "socketserver.ThreadingMixIn", "line_number": 77, "usage_type": "name"}, {"api_name": "http.server.HTTPServer", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "463870255", "text": "from django.db import IntegrityError\n\nfrom rest_framework import generics, status, views\nfrom rest_framework.response import Response\n\nfrom videos.models import Video\nfrom videos.permissions import VideoViewPermissions\nfrom videos.serializers import VideoSerializer, CreateVideoSerializer\n\nfrom eswrapper.mixins import ESPaginationMixin\n\n\nclass ListVideos(generics.ListCreateAPIView):\n\n queryset = Video.objects.all()\n serializer_class = VideoSerializer\n permission_classes = (VideoViewPermissions, )\n\n def post(self, request, *args, **kwargs):\n serializer = CreateVideoSerializer(data=request.data)\n if not serializer.is_valid():\n return Response(status=status.HTTP_400_BAD_REQUEST)\n try:\n v = Video.objects.create(**serializer.data)\n return Response(VideoSerializer(v).data, status=status.HTTP_201_CREATED)\n except IntegrityError:\n return Response(status=status.HTTP_409_CONFLICT)\n\n\nclass VideoDetail(generics.RetrieveUpdateDestroyAPIView):\n\n queryset = Video.objects.all()\n serializer_class = VideoSerializer\n permission_classes = (VideoViewPermissions, )\n lookup_url_kwarg = 'video_pk'\n\n\nclass ESVideoList(ESPaginationMixin, views.APIView):\n\n def get(self, request, *args, **kwargs):\n qs = Video.es_objects.all()\n resp = self.esresp(Video.objects.count(), qs)\n return Response(resp, status=status.HTTP_200_OK)\n", "sub_path": "trickapi/api/video_api.py", "file_name": "video_api.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 13, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 15, "usage_type": "name"}, {"api_name": "videos.serializers.VideoSerializer", "line_number": 16, "usage_type": "name"}, {"api_name": "videos.permissions.VideoViewPermissions", "line_number": 17, "usage_type": "name"}, {"api_name": "videos.serializers.CreateVideoSerializer", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 22, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.create", "line_number": 24, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 25, "usage_type": "call"}, {"api_name": "videos.serializers.VideoSerializer", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 30, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.all", "line_number": 32, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 32, "usage_type": "name"}, {"api_name": "videos.serializers.VideoSerializer", "line_number": 33, "usage_type": "name"}, {"api_name": "videos.permissions.VideoViewPermissions", "line_number": 34, "usage_type": "name"}, {"api_name": "eswrapper.mixins.ESPaginationMixin", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rest_framework.views", "line_number": 38, "usage_type": "name"}, {"api_name": "videos.models.Video.es_objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "videos.models.Video.es_objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 41, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.count", "line_number": 42, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 43, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "237869222", "text": "#!/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n\"\"\"\n/***************************************************************************\n *\n * Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved\n * @file: seg_predict_cpu.py\n * @date 2021/5/8 2:28 PM\n * @brief \n *\n **************************************************************************/\n\"\"\"\nimport paddlehub as hub\nimport cv2\nimport os\nimport shutil\n\npwd = os.getcwd()\nmodels_save = os.path.join(pwd, 'models_save')\npwd_last = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\nimg_data = os.path.join(pwd_last, 'img_data')\nresults = os.path.join(pwd, 'results')\nif os.path.exists(results):\n shutil.rmtree(results)\n\npic_list = ['car.jpeg', 'det_03.jpg', 'small_bike.jpg', 'det_02.jpeg']\n\nfor pic in pic_list:\n model = hub.Module(\n name='ocrnet_hrnetw18_voc',\n pretrained=os.path.join(models_save, 'ocrnet_hrnetw18_voc', 'epoch_2',\n 'model.pdparams'))\n img = cv2.imread(os.path.join(img_data, pic))\n model.predict(images=[img], visualization=True, save_path=results)\n\nassert len(os.listdir(os.path.join(results, 'image'))) == len(pic_list)\nassert len(os.listdir(os.path.join(results, 'mask'))) == len(pic_list)\n", "sub_path": "ce_cloud_models/PaddleHub/SEG/linux/scripts/hub_ocrnet_hrnetw18_voc/seg_predict_cpu.py", "file_name": "seg_predict_cpu.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.getcwd", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 25, "usage_type": "call"}, {"api_name": "paddlehub.Module", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "614184426", "text": "# https://www.acmicpc.net/problem/2178\r\n# 미로 찾기\r\n\r\n'''\r\nBFS 의 특징은 각 정점을 최단경로로 방문한다는 것.\r\n'''\r\nimport sys\r\nfrom collections import deque\r\n\r\ndef bfs(N, M, ones):\r\n dist = [[0] * M for _ in range(N)]\r\n queue = deque()\r\n check = []\r\n\r\n queue.append(ones[0])\r\n\r\n while queue:\r\n node = queue.popleft()\r\n r, c = node\r\n \r\n if r == N - 1 and c == M - 1:\r\n return dist[N - 1][M - 1]\r\n \r\n if (r + 1, c) in ones and (r + 1, c) not in check:\r\n queue.append((r + 1, c))\r\n check.append((r + 1, c))\r\n dist[r + 1][c] = dist[r][c] + 1\r\n\r\n if (r - 1, c) in ones and (r - 1, c) not in check:\r\n queue.append((r - 1, c))\r\n check.append((r - 1, c))\r\n dist[r - 1][c] = dist[r][c] + 1\r\n\r\n if (r, c + 1) in ones and (r, c + 1) not in check:\r\n queue.append((r, c + 1))\r\n check.append((r, c + 1))\r\n dist[r][c + 1] = dist[r][c] + 1\r\n\r\n if (r, c - 1) in ones and (r, c - 1) not in check:\r\n queue.append((r, c - 1))\r\n check.append((r, c - 1))\r\n dist[r][c - 1] = dist[r][c] + 1\r\n\r\n return dist[N - 1][M - 1]\r\n\r\n\r\nN, M = map(int, sys.stdin.readline().split())\r\n\r\nmat = []\r\nones = []\r\nfor i in range(N):\r\n tmp = list(sys.stdin.readline().strip())\r\n line = list(map(int, tmp))\r\n mat.append(line)\r\n for j in range(len(line)):\r\n if line[j] == 1:\r\n ones.append((i, j))\r\n\r\n\r\nprint(bfs(N, M, ones) + 1)", "sub_path": "DFS_BFS/2178.py", "file_name": "2178.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.deque", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "332053206", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/mad/Documents/spike/spike/Algo/Cadzow_mpi2.py\n# Compiled at: 2017-08-31 16:40:33\n# Size of source mod 2**32: 7772 bytes\n\"\"\"\nCreated by Marc-André Delsuc and Lionel Chiron on 2011-07\nCopyright (c) 2010 IGBMC. All rights reserved.\n\nCadzow in MPI mode\ncomplete rewrite from the code from Cyrille Bonamy for the MPI part\n\ncode compatible avec la version 0.4.0 de NPK\n\nThresholding to make Cadzow on the main relevant columns.\n\nnote that the cadzow algo is multithreaded if running over the MKL library.\nSo if MKL is installed, run only on instance per node, as all cores from the node will be solicited.\n\"\"\"\nfrom __future__ import print_function\nimport sys, numpy as np\nimport util.mpiutil as mpiutil\nimport util.progressbar as pg\nimport tables, time, urQRd, Cadzow\nfrom spike.NPKData import NPKData, copyaxes\nfrom spike.FTICR import FTICRData\nimport spike.File.HDF5File as HDF5File\nimport spike.NPKConfigParser as NPKConfigParser\ndebug = False\n\ndef Set_Table_Param():\n tables.parameters.CHUNK_CACHE_PREEMPT = 1\n tables.parameters.CHUNK_CACHE_SIZE = 104857600\n tables.parameters.METADATA_CACHE_SIZE = 104857600\n tables.parameters.NODE_CACHE_SLOTS = 104857600\n\n\ndef selectcol(data, limitpts, nbrows=200):\n \"\"\"\n returns a list of index of the limitpts largest columns of the 2D 'data'\n \n first averaging on nbrows rows\n \n return index list\n \"\"\"\n if debug:\n print('averaging on ', nbrows, ' rows ')\n else:\n roughft2 = data.row(0)\n if roughft2.axis1.itype == 1:\n roughft2.modulus()\n else:\n roughft2.abs()\n for i in range(min(nbrows, data.size1)):\n rr = data.row(i)\n if rr.axis1.itype == 1:\n rr.modulus()\n else:\n rr.abs()\n roughft2.add(rr)\n\n roughft2.mult(1.0 / nbrows)\n n = roughft2.size1 * 0.1\n roughft2.buffer[0:n] = 0.0\n index = find_thres(roughft2, limitpts=limitpts)\n if debug:\n roughft2.display()\n disp = NPKData(buffer=(np.zeros(roughft2.size1)))\n disp.buffer[index] = roughft2.buffer[index]\n disp.display(show=True)\n return index\n\n\ndef find_thres(b, limitpts):\n \"\"\"\n returns a list of index of the limitpts largest points in the 1D data 'b' \n \"\"\"\n thresh = max(b.buffer) + 1.0\n nbpts = 0\n count = 0\n inter = b.buffer.copy()\n while abs(nbpts - limitpts) / float(limitpts) > 0.1:\n if debug:\n print('thresh : ', thresh)\n else:\n nbpts = (inter > thresh).sum()\n inter[inter < thresh] = 0\n if debug:\n print('nbpts', nbpts, 'count ', count)\n count += 1\n if nbpts < limitpts:\n c = inter\n threshold = thresh\n if debug:\n print('threshold ', threshold)\n thresh /= 2.0\n ind = np.where(c > 0)[0]\n else:\n if debug:\n print('treshold min = ', thresh)\n thresh = (threshold + thresh) / 2.0\n if debug:\n print('nouveau threshold ', thresh)\n inter = np.copy(b.buffer)\n if debug:\n print('au dessus thresh ', (inter > thresh).sum())\n if debug:\n print('=0 ', (inter == 0).sum())\n\n return ind\n\n\ndef load_input(name):\n \"\"\"load input file and returns it, in read-only mode\"\"\"\n if debug > 0:\n print('reading', name)\n hf = HDF5File(name, 'r')\n d0 = hf.load()\n return d0\n\n\ndef iterarg(xindex, dinp, n_of_line, n_of_iter, orda):\n \"\"\"an iterator used by the MPI set-up\"\"\"\n for i in xindex:\n c0 = dinp.col(i)\n if debug:\n print(c0.buffer, n_of_line, n_of_iter, orda)\n yield (\n c0.buffer, n_of_line, n_of_iter, orda)\n\n\ndef cadz(args):\n \"\"\"utility function\"\"\"\n if debug:\n print(args)\n return (Cadzow.cadzow)(*args)\n\n\ndef rqr(args):\n \"\"\"utility function\"\"\"\n if debug:\n print(args)\n argu = (\n args[0], args[1], args[3])\n return (urQRd.urQRd)(*argu)\n\n\ndef main():\n \"\"\"does the whole job,\n if we are running in MPI, this is only called by job #0\n all other jobs are running mpi.slave()\n \"\"\"\n argv = sys.argv\n if len(argv) != 2:\n print('\\nsyntax is :\\n(mpirun -np N) python program configfile.mscf\\n')\n sys.exit(1)\n else:\n configfile = argv[1]\n cp = NPKConfigParser()\n cp.readfp(open(configfile))\n infile = cp.getword('Cadzow', 'namein')\n print('infile', infile)\n outfile = cp.getword('Cadzow', 'nameout')\n print('outfile', outfile)\n algo = cp.getword('Cadzow', 'algorithm')\n print('algorithm', algo)\n n_of_line = cp.getint('Cadzow', 'n_of_lines', 70)\n print('n_of_line', n_of_line)\n n_of_iter = cp.getint('Cadzow', 'n_of_iters', 1)\n print('n_of_iter', n_of_iter)\n orda = cp.getint('Cadzow', 'order', 500)\n print('order', orda)\n n_of_column = cp.getint('Cadzow', 'n_of_column', 100)\n print('n_of_column', n_of_column)\n progress = cp.getboolean('Cadzow', 'progress', True)\n d0 = load_input(infile)\n d0.check2D()\n Set_Table_Param()\n hfar = HDF5File(outfile, 'w', debug=0)\n d1 = FTICRData(dim=2)\n copyaxes(d0, d1)\n group = 'resol1'\n hfar.create_from_template(d1, group)\n if n_of_column == 0:\n indexes = range(d0.size2)\n else:\n indexes = selectcol(d0, n_of_column)\n if algo == 'Cadzow':\n meth = cadz\n else:\n if algo == 'rQRd':\n meth = rqr\n else:\n raise 'wrong algo'\n t0 = time.time()\n if progress:\n widgets = [\n 'Processing %s: ' % algo, pg.Percentage(), ' ', pg.Bar(marker='-', left='[', right=']'), pg.ETA()]\n pbar = pg.ProgressBar(widgets=widgets, maxval=(len(indexes)))\n d1D = d0.col(0)\n xarg = iterarg(indexes, d0, n_of_line, n_of_iter, orda)\n if mpiutil.MPI_size > 1:\n mpiutil.mprint('MPI Master job - starting slave jobs - ')\n res = mpiutil.enum_imap(meth, xarg)\n for i, p in res:\n d1D.buffer = p\n d1.set_col(indexes[i], d1D)\n if progress:\n pbar.update(i + 1)\n\n else:\n import itertools\n res = itertools.imap(meth, xarg)\n for i, p in enumerate(res):\n d1D.buffer = p\n d1.set_col(indexes[i], d1D)\n if progress:\n pbar.update(i + 1)\n\n print('Processing time : ', time.time() - t0)\n\n\nif __name__ == '__main__':\n if mpiutil.MPI_size < 2:\n print('Running in single processor mode')\n main()\n else:\n print('Running in MPI mode')\n if mpiutil.MPI_rank == 0:\n main()\n else:\n mpiutil.slave()", "sub_path": "pycfiles/spike_py-0.99.15.tar/Cadzow_mpi2.cpython-37.py", "file_name": "Cadzow_mpi2.cpython-37.py", "file_ext": "py", "file_size_in_byte": 7030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tables.parameters", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tables.parameters", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tables.parameters", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tables.parameters", "line_number": 37, "usage_type": "attribute"}, {"api_name": "spike.NPKData.NPKData", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 106, "usage_type": "call"}, {"api_name": "spike.File.HDF5File", "line_number": 119, "usage_type": "call"}, {"api_name": "Cadzow.cadzow", "line_number": 138, "usage_type": "call"}, {"api_name": "urQRd.urQRd", "line_number": 147, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 158, "usage_type": "call"}, {"api_name": "spike.NPKConfigParser", "line_number": 161, "usage_type": "call"}, {"api_name": "spike.File.HDF5File", "line_number": 181, "usage_type": "call"}, {"api_name": "spike.FTICR.FTICRData", "line_number": 182, "usage_type": "call"}, {"api_name": "spike.NPKData.copyaxes", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 197, "usage_type": "call"}, {"api_name": "util.progressbar.Percentage", "line_number": 200, "usage_type": "call"}, {"api_name": "util.progressbar", "line_number": 200, "usage_type": "name"}, {"api_name": "util.progressbar.Bar", "line_number": 200, "usage_type": "call"}, {"api_name": "util.progressbar.ETA", "line_number": 200, "usage_type": "call"}, {"api_name": "util.progressbar.ProgressBar", "line_number": 201, "usage_type": "call"}, {"api_name": "util.progressbar", "line_number": 201, "usage_type": "name"}, {"api_name": "util.mpiutil.MPI_size", "line_number": 204, "usage_type": "attribute"}, {"api_name": "util.mpiutil", "line_number": 204, "usage_type": "name"}, {"api_name": "util.mpiutil.mprint", "line_number": 205, "usage_type": "call"}, {"api_name": "util.mpiutil", "line_number": 205, "usage_type": "name"}, {"api_name": "util.mpiutil.enum_imap", "line_number": 206, "usage_type": "call"}, {"api_name": "util.mpiutil", "line_number": 206, "usage_type": "name"}, {"api_name": "itertools.imap", "line_number": 215, "usage_type": "call"}, {"api_name": "time.time", "line_number": 222, "usage_type": "call"}, {"api_name": "util.mpiutil.MPI_size", "line_number": 226, "usage_type": "attribute"}, {"api_name": "util.mpiutil", "line_number": 226, "usage_type": "name"}, {"api_name": "util.mpiutil.MPI_rank", "line_number": 231, "usage_type": "attribute"}, {"api_name": "util.mpiutil", "line_number": 231, "usage_type": "name"}, {"api_name": "util.mpiutil.slave", "line_number": 234, "usage_type": "call"}, {"api_name": "util.mpiutil", "line_number": 234, "usage_type": "name"}]} +{"seq_id": "254738972", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom monitoring_app.models import CompetitorProduct\nimport random\nfrom decimal import Decimal\n\n\ndef get_html(url):\n user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36'\n r = requests.get(url, headers={'User-Agent': user_agent})\n if r.ok:\n return r.text\n print(r.status_code)\n\n\ndef refined(s):\n s = s.replace('\\t', '').replace('\\n', '').replace('\\r', '')\n return s\n\n\ndef get_page_data(html):\n data_list = []\n soup = BeautifulSoup(html, 'lxml')\n divs = soup.find_all('a', class_=\"sel-product-tile-title\")\n\n for div in divs:\n url = 'https://www.mvideo.ru' + div.get('href')\n products = div.get('data-product-info').split('{')[1::2]\n\n for product in products:\n refined_product = refined(product)\n p = '{' + refined_product\n\n d = eval(p)\n\n id_product = d.get('productId')\n name = d.get('productName')\n price = d.get('productPriceLocal')\n categoryId = d.get('productCategoryId')\n categoryName = d.get('productCategoryName')\n vendorName = d.get('productVendorName')\n groupId = d.get('productGroupId')\n shop = 'М.видео'\n\n data = {'id_product': id_product,\n 'name': name,\n # генерация цены с рандомайзером для создания образца базы данных МОИХ товаров\n # 'price': float(price) + round(random.uniform(-1, 1)*400)*5,\n 'price': price,\n 'categoryId': categoryId,\n 'categoryName': categoryName,\n 'vendorName': vendorName.lower().title(),\n 'groupId': groupId,\n 'url': url,\n 'shop': shop}\n\n print(data)\n data_list.append(data)\n return data_list\n\n\ndef write_db(competitor_products):\n meta = {'updated_count': 0, 'created_count': 0}\n urls = [competitor_product.get('url') for competitor_product in competitor_products if\n competitor_product.get('url')]\n CompetitorProduct.objects.filter(url__in=urls).update(status=False)\n\n for competitor_product in competitor_products:\n url = competitor_product.get('url')\n if url:\n price = Decimal(competitor_product.get('price'))\n id_product = int(competitor_product.get('id_product'))\n categoryId = competitor_product.get('categoryId')\n categoryName = competitor_product.get('categoryName')\n vendorName = competitor_product.get('vendorName')\n groupId = competitor_product.get('groupId')\n shop = competitor_product.get('shop')\n name = competitor_product.get('name')\n\n _, created = CompetitorProduct.objects.update_or_create(url=url, defaults={'id_product': id_product,\n 'name': name,\n 'price': price,\n 'categoryId': categoryId,\n 'categoryName': categoryName,\n 'vendorName': vendorName,\n 'groupId': groupId,\n 'status': True,\n 'shop': shop})\n if created:\n meta['created_count'] += 1\n else:\n meta['updated_count'] += 1\n return meta\n\n\ndef mvideo(url_target, page_count):\n pattern = url_target + '/f/page={}'\n for i in range(1, int(page_count) + 1):\n url = pattern.format(str(i))\n html = get_html(url)\n product_list = get_page_data(html)\n write_db(product_list)\n product_count_on_page = len(product_list)\n print(\"-\" * 42 + \"\\nНа странице номер {} получено {} продуктов\".format(i,\n product_count_on_page) + \"\\n\" + \"-\" * 42)\n meta = write_db(product_list)\n print(f'--> {i}: {meta}')\n all_product_count = int(product_count_on_page) * int(page_count)\n print(\"-\" * 42 + \"\\nВсего на странице {} получено {} продуктов\".format(url_target,\n all_product_count) + \"\\n\" + \"-\" * 42)\n", "sub_path": "app/monitoring_app/parsers/mvideo.py", "file_name": "mvideo.py", "file_ext": "py", "file_size_in_byte": 4937, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects.filter", "line_number": 66, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "monitoring_app.models.CompetitorProduct", "line_number": 66, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 71, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects.update_or_create", "line_number": 80, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "monitoring_app.models.CompetitorProduct", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "570154954", "text": "import matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sys import exit\nsns.set_style('white')\nsns.set_context('paper')\n# Plot adjustments:\nplt.rcParams.update({'ytick.labelsize': 28})\nplt.rcParams.update({'xtick.labelsize': 28})\nplt.rcParams.update({'axes.labelsize': 45})\nplt.rcParams.update({'legend.fontsize': 36})\nplt.rcParams.update({'axes.titlesize':50})\nplt.rcParams.update({'axes.grid': False})\n\nregression_data = ['52_log_GFP_0.01_LOO.txt',\n '52_log_sum_ratio_0.019_LOO.txt',\n 'lin_log_mKate_0.019_LOO.txt']\nclass_data = ['2016-06-22__GFP_above_parent_SEStructure_LOO.txt',\n '2016-06-22__sum_ratio_above_parent_SEStructure_LOO.txt',\n '2016-06-22__mKate_above_parent_structure_LOO.txt',\n]\nfile_names = ['GFP',\n 'sum_ratio',\n 'mKate']\nnames = ['localization',\n 'localization efficiency',\n 'expression']\nvalidations = [('log_GFP_52_0.01.txt', 'GFP_above_SEStructure.txt'),\n ('log_sum_ratio_52_0.019.txt', 'sum_ratio_above_SEStructure.txt'),\n ('log_mKate_lin_0.019.txt', 'mKate_above_structure.txt',)]\nys = ['log_GFP',\n 'log_sum_ratio',\n 'log_mKate']\n\nroot = '../../Programming Tools/Twist Project/'\ndata_folder = root + '2016-06-22/models/'\nvalidation_folder = root + '2016-06-22/validation/'\nplot_folder = 'plots/'\nparents = {'cschrimson':sns.xkcd_rgb['pale red'],\n 'c1c2':sns.xkcd_rgb['medium green'],\n 'cheriff':sns.xkcd_rgb['denim blue']}\nparent_names = ['cschrimson', 'c1c2', 'cheriff']\n\nformatter = mtick.FormatStrFormatter('%.0f')\n\nwith open(root + '2016-06-22/props.pkl', 'rb') as f:\n props = pickle.load(f, encoding='latin1')\nwith open(root + '2016-06-22/validation_props.pkl', 'rb') as f:\n v_props = pickle.load(f, encoding='latin1')\n\n# cdfs\nfig = plt.figure()\nfig.set_size_inches(11.5,8)\nax1 = fig.add_subplot(111)\nprops = props.dropna()\nprops = props.sort_values('log_mKate')\nprops['mKate_rank'] = np.linspace(0.0, 1.0, len(props))\nprops = props.sort_values('log_GFP')\nprops['GFP_rank'] = np.linspace(0.0, 1.0, len(props))\nprops = props.sort_values('log_sum_ratio')\nprops['ratio_rank'] = np.linspace(0.0, 1.0, len(props))\nalpha = 0.7\nmKate_handle, = ax1.plot(props['log_mKate'], props['mKate_rank'],\n 'o', label='expression', alpha=alpha)\nGFP_handle, = ax1.plot(props['log_GFP'], props['GFP_rank'],\n 'o', label='localization', alpha=alpha)\nratio_handle, = ax1.plot(props['log_sum_ratio'], props['ratio_rank'],\n 'o', label='localization efficiency', alpha=alpha)\nax1.set_ylabel('cumulative probability')\nleg = ax1.legend(handles=[mKate_handle, GFP_handle, ratio_handle],\n loc='best', handletextpad=0)\nax1.margins(0.02)\nfig.savefig('plots/cdfs.pdf')\n\n# with verification\nkeep_me = ['name', 'log_mKate', 'log_GFP', 'log_sum_ratio']\nv_props = v_props[~v_props['name'].isin(parent_names)]\nall_props = pd.concat([props[keep_me], v_props[keep_me]])\nall_props = all_props.sort_values('log_mKate')\nall_props['mKate_rank'] = np.linspace(0.0, 1.0, len(all_props))\nall_props = all_props.sort_values('log_GFP')\nall_props['GFP_rank'] = np.linspace(0.0, 1.0, len(all_props))\nall_props = all_props.sort_values('log_sum_ratio')\nall_props['ratio_rank'] = np.linspace(0.0, 1.0, len(all_props))\nfig = plt.figure()\nfig.set_size_inches(11.5,8)\nax1 = fig.add_subplot(111)\nalpha = 0.1\nax1.plot(props['log_mKate'], props['mKate_rank'],\n 'o', label='expression', alpha=alpha)\nax1.plot(props['log_GFP'], props['GFP_rank'],\n 'o', label='localization', alpha=alpha)\nax1.plot(props['log_sum_ratio'], props['ratio_rank'],\n 'o', label='localization efficiency', alpha=alpha)\nveri = all_props[all_props['name'].isin(v_props['name'])]\nalpha = 1.0\nax1.set_prop_cycle(None)\nmKate_handle, = ax1.plot(veri['log_mKate'], veri['mKate_rank'],\n 'o', label='expression', alpha=alpha)\nGFP_handle, = ax1.plot(veri['log_GFP'], veri['GFP_rank'],\n 'o', label='localization', alpha=alpha)\nratio_handle, = ax1.plot(veri['log_sum_ratio'], veri['ratio_rank'],\n 'o', label='localization efficiency', alpha=alpha)\nax1.set_ylabel('cumulative probability')\nleg = ax1.legend(handles=[mKate_handle, GFP_handle, ratio_handle],\n loc='best', handletextpad=0)\nax1.margins(0.02)\nfig.savefig('plots/verification_cdfs.pdf')\n\nfor reg, clas, file_name, name, validation, y in zip(regression_data,\n class_data, file_names,\n names, validations, ys):\n r_df = pd.read_csv(data_folder + reg, skiprows=1, comment='#')\n c_df = pd.read_csv(data_folder + clas, comment='#')\n r_v = pd.read_csv(validation_folder + validation[0], comment='#')\n c_v = pd.read_csv(validation_folder + validation[1], comment='#')\n\n # plot regression and classification LOOs side by side\n fig = plt.figure()\n fig.set_size_inches((24,9))\n ax1 = fig.add_subplot(121)\n ax1.plot(r_df['y'], r_df['mu'], 'o', ms=12, color='grey', alpha=0.5)\n for p in parent_names:\n ax1.plot(r_df[r_df['name']==p]['y'], r_df[r_df['name']==p]['mu'],\n 'o', ms=14, color=parents[p], alpha=0.8)\n ax1.set_xlabel('measured\\n' + name)\n ax1.set_ylabel('predicted ' + name)\n ax1.set_title('regression')\n xlims = ax1.get_xlim()\n if name != 'expression':\n ylims = ax1.get_ylim()\n ylims = ax1.set_ylim([xlims[0] * 0.75, ylims[1]])\n ax2 = fig.add_subplot(122)\n c_df['real'] = [props[props['name']==n][y] for n in c_df['name']]\n ax2.plot(c_df['real'], c_df['pi'], 'o', ms=12, color='grey', alpha=0.5)\n for p in parent_names:\n ax2.plot(c_df[c_df['name']==p]['real'], c_df[c_df['name']==p]['pi'],\n 'o', ms=14, color=parents[p], alpha=0.8)\n frac = 0.9\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0,\n box.width * frac, box.height])\n box = ax2.get_position()\n ax2.set_position([box.x0 - box.width * (1-frac), box.y0,\n box.width * frac, box.height])\n\n lg = plt.legend(('training set', 'CsChrimR', 'C1C2', 'CheRiff'),\n loc='center left', bbox_to_anchor=(1, 0.5),\n frameon=True, handletextpad=0, borderpad=0.03)\n lowest_parent = min(props[props['name'].isin(parent_names)][y])\n ylims = ax2.set_ylim([0, 1])\n xlims = ax2.set_xlim(xlims)\n ax2.set_title('classification')\n ax2.axvspan(xlims[0], lowest_parent, facecolor='grey', alpha=0.2)\n ax2.axvline(lowest_parent, color=sns.xkcd_rgb['gold'], alpha=0.8)\n ax2.set_xlabel('measured\\n' + name)\n ax2.set_ylabel('predicted prob above parent')\n fig.savefig('plots/' + file_name + '_LOO.pdf',\n bbox_inches='tight')\n # plot combined regression and classification side by side\n ax1.plot(r_v['y'], r_v['mu'], 'o', ms=12, color='black', alpha=0.9)\n ax2.plot(c_v['real'], c_v['pi'], 'o', ms=12, color='black', alpha=0.9)\n handles, labels = ax2.get_legend_handles_labels()\n lg = plt.legend(handles[0:4] + [handles[-1]],\n ('training set', 'CsChrimR', 'C1C2', 'CheRiff', 'verify'),\n loc = 'center left', bbox_to_anchor=(1, 0.5),\n frameon=True, handletextpad=0, borderpad=0.03)\n fig.savefig('plots/' + file_name + '_combined.pdf', bbox_inches='tight')\n", "sub_path": "2/plotting.py", "file_name": "plotting.py", "file_ext": "py", "file_size_in_byte": 7560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "seaborn.set_style", "line_number": 8, "usage_type": "call"}, {"api_name": "seaborn.set_context", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 42, "usage_type": "attribute"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 43, "usage_type": "attribute"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 44, "usage_type": "attribute"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 47, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 50, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 158, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}]} +{"seq_id": "94564782", "text": "from Countries import France\nfrom mimesis import Person\nfrom mimesis.enums import Gender\n\nperson = Person(France['gen'])\nperson.full_name(gender=Gender.MALE)\n\n\"\"\"\ncs da de de-at de-ch el en en-au en-ca \nCzech Danish German Austrian german Swiss german Greek English Australian English Canadian English\n\nen-gb es es-mx et fa fi fr hu is it \nBritish English Spanish Mexican Spanish Estonian Farsi Finnish French Hungarian Icelandic Italian \n\nja kk ko nl nl-be no pl pt pt-br \nJapanese Kazakh Korean Dutch Belgium Dutch Norwegian Polish Portuguese Brazilian Portuguese \n\nru sv tr uk zh \nRussian Swedish Turkish Ukrainian Chinese \n\n\"\"\"\n\n# Romania\n\nRomanian_surnames = {'Popa', 'Popescu', 'Ionescu', 'Pop', 'Radu', 'Dumitru', 'Gheorghe', 'Stoica', 'Stan', 'Munteanu',\n 'Constantin', 'Andrei', 'Rusu', 'Anghel', 'Matei', 'Marin', 'Mihai', 'Ciobanu', 'Serban', 'Stefan',\n 'Lazar', 'Florea', 'Dumitrescu', 'Barbu', 'Stanciu', 'Vasile', 'Ilie', 'Cristea', 'Toma',\n 'Moldovan', 'Oprea', 'Dinu', 'Tudor', 'Ionita', 'Ion', 'Ungureanu', 'Constantinescu', 'Georgescu',\n 'Balan', 'Neagu', 'Dragomir', 'Badea', 'Cojocaru', 'Sandu', 'Mocanu', 'Enache', 'Nagy', 'Coman',\n 'Craciun', 'Lupu', 'Muresan', 'Vlad', 'Dobre', 'Tanase', 'Avram', 'Radulescu', 'Iordache',\n 'Grigore', 'Lungu', 'Ivan', 'Nicolae', 'Szabo', 'Bucur', 'Manea', 'Ene', 'Marinescu', 'Alexandru',\n 'Petre', 'Albu', 'Voicu', 'Preda', 'Iancu', 'Dragan', 'Olteanu', 'Stoian', 'David', 'Petrescu',\n 'Roman', 'Iacob', 'Filip', 'Diaconu', 'Costea', 'Baciu', 'Marcu', 'Rosu', 'Nistor', 'Kovacs',\n 'Pavel', 'Cretu', 'Stanescu', 'Anton', 'Simion', 'Luca', 'Nita', 'Calin', 'Rotaru', 'Nedelcu',\n 'Bogdan', 'Suciu', 'Crisan'}\n", "sub_path": "1930_name_gen.py", "file_name": "1930_name_gen.py", "file_ext": "py", "file_size_in_byte": 2360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "mimesis.Person", "line_number": 5, "usage_type": "call"}, {"api_name": "Countries.France", "line_number": 5, "usage_type": "name"}, {"api_name": "mimesis.enums.Gender.MALE", "line_number": 6, "usage_type": "attribute"}, {"api_name": "mimesis.enums.Gender", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "482545842", "text": "\"\"\"utility functions to build CLI.\"\"\"\n\nfrom __future__ import print_function\nimport six\nimport sys\nimport ctypes\n\nSYNUTIL_INSTANCE = None\n\n\ndef _get_synutil():\n global SYNUTIL_INSTANCE\n if SYNUTIL_INSTANCE is None:\n i = ctypes.cdll.LoadLibrary(\"libsynutil.so\")\n i.synutil_echo_ok.restype = None\n i.synutil_echo_ok.argtypes = [ctypes.c_char_p]\n i.synutil_echo_nok.restype = None\n i.synutil_echo_nok.argtypes = [ctypes.c_char_p]\n i.synutil_echo_warning.restype = None\n i.synutil_echo_warning.argtypes = [ctypes.c_char_p]\n i.synutil_echo_bold.restype = None\n i.synutil_echo_bold.argtypes = [ctypes.c_char_p]\n i.synutil_echo_running.restype = None\n i.synutil_echo_running.argtypes = []\n i.synutil_echo_clean.restype = None\n i.synutil_echo_clean.argtypes = []\n SYNUTIL_INSTANCE = i\n return SYNUTIL_INSTANCE\n\n\ndef echo_ok(message=\"\"):\n \"\"\"Write [OK] with colors if supported a little optional message.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n _get_synutil().synutil_echo_ok(message.encode('utf8'))\n\n\ndef echo_nok(message=\"\"):\n \"\"\"Write [ERROR] with colors if supported a little optional message.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n _get_synutil().synutil_echo_nok(message.encode('utf8'))\n\n\ndef echo_warning(message=\"\"):\n \"\"\"Write [WARNING] with colors if supported a little optional message.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n _get_synutil().synutil_echo_warning(message.encode('utf8'))\n\n\ndef echo_bold(message):\n \"\"\"Write a message in bold (if supported).\n\n Args:\n message (string): message to write in bold.\n\n \"\"\"\n _get_synutil().synutil_echo_bold(message.encode('utf8'))\n\n\ndef echo_running(message=None):\n \"\"\"Write [RUNNING] with colors if supported.\n\n You can pass an optional message which will be rendered before [RUNNING]\n on the same line.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n if message is None:\n _get_synutil().synutil_echo_running()\n else:\n if six.PY2:\n print(message, end=\"\")\n sys.stdout.flush()\n else:\n print(message, end=\"\", flush=True)\n _get_synutil().synutil_echo_running()\n\n\ndef echo_clean():\n \"\"\"Clean waiting status.\"\"\"\n _get_synutil().synutil_echo_clean()\n", "sub_path": "layers/layer1_python3/0100_mfutil/mfutil/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 2435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ctypes.cdll.LoadLibrary", "line_number": 14, "usage_type": "call"}, {"api_name": "ctypes.cdll", "line_number": 14, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 16, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 20, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 22, "usage_type": "attribute"}, {"api_name": "six.PY2", "line_number": 84, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 86, "usage_type": "attribute"}]} +{"seq_id": "587857399", "text": "#!/usr/bin/python\n\nimport sys\nimport json\n\n#set the path to the input file\ntweets_data_path = 'C:/Users/Tanvi/Desktop/project2/stream/tweets_MH.txt'\n#tweets_data_path = 'C:/Users/Tanvi/Desktop/project2/stream/pollution/tweets_P.txt'\n\n#initialize an array and open the output file for reading\ntweets_file = open(tweets_data_path, \"r\")\n\n\n#process each line in input file\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n num_urls = len(tweet['entities']['urls'])\n #print(\"num_urls: \", num_urls)\n if num_urls > 0:\n for i in range(num_urls):\n url = tweet['entities']['urls'][i][\"expanded_url\"]\n if url:\n print (\"{}\\t{}\".format(url.lower(), 1))\n else:\n url = tweet['entities']['urls'][i][\"url\"]\n if url:\n print (\"{}\\t{}\".format(url.lower(), 1)) \n \n except:\n continue\n \n\n", "sub_path": "Code/mapper_topUrls.py", "file_name": "mapper_topUrls.py", "file_ext": "py", "file_size_in_byte": 975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.loads", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "461753459", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass PCNet(nn.Module):\n\n def __init__(self, K: int, M: int, R_epochs: int = 150, R_lr: float = 0.1, lmda: float = 5e-3):\n '''\n Create a sparse coding network. Neural responses are fitted through ISTA algorithm.\n\n Args:\n K: number of neurons\n M: size of receptive field (width / height)\n R_epochs: number of epochs to run for ISTA\n R_lr: learning rate for ISTA\n lmda: regularization strength for ISTA\n '''\n super(PCNet, self).__init__()\n self.K = K\n self.M = M\n self.R_epochs = R_epochs\n self.R_lr = R_lr\n self.lmda = lmda\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # model weigths\n self.U = torch.randn(self.K, self.M ** 2, requires_grad=True, device=self.device)\n with torch.no_grad():\n self.U = F.normalize(self.U, dim=1)\n self.U.requires_grad_(True)\n # responses\n self.R = None\n\n def _ista(self, img_batch):\n # create R\n batch_size = img_batch.shape[0]\n self.R = torch.zeros((batch_size, self.K), requires_grad=True, device=self.device)\n # trian\n for _ in range(self.R_epochs):\n # pred\n pred = self.R @ self.U\n # loss\n loss = ((img_batch - pred) ** 2).sum()\n loss.backward()\n # update R in place\n self.R.data.sub_(self.R_lr * self.R.grad.data)\n # zero grad\n self.zero_grad()\n # soft thresholding\n with torch.no_grad():\n self.R = PCNet._soft_thresholding(self.R, self.lmda)\n self.R.requires_grad_(True)\n\n @staticmethod\n def _soft_thresholding(x, alpha):\n return F.relu(x - alpha) - F.relu(-x - alpha)\n\n def zero_grad(self):\n self.U.grad.data.zero_()\n self.R.grad.data.zero_()\n\n def forward(self, img_batch):\n # first fit\n self._ista(img_batch)\n # now predict again\n pred = self.R @ self.U\n return pred\n", "sub_path": "src/model/PCNet.py", "file_name": "PCNet.py", "file_ext": "py", "file_size_in_byte": 2149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "575177164", "text": "import torch\n\nchars_lower = [ chr(code) for code in range(ord('a'),ord('z')+1)]\nchars_upper = [ chr(code) for code in range(ord('A'),ord('Z')+1)]\nchars_special = [ code for code in \" -_.\" ]\ncode_special = [ \"?\", \"\", \"\", \"PAD\" ]\n\nSYMBOLS = chars_lower + chars_upper + chars_special + code_special\n\nMAX_LEN=27\nFIRST_LAYER_SIZE=MAX_LEN * len(SYMBOLS)\n\nMAX_OUT_LEN=22\nLAST_LAYER_SIZE=MAX_OUT_LEN * len(SYMBOLS)\n\nSEP_TOKEN = '[SEP]'\nCLS_TOKEN = '[CLS]'\nTRAIN_FILE_PATH = './data/labeled-2.csv'\nMODEL_FILE_PATH = '/share/model/predicate-model.pth'\nMODEL_OVERWRITE = False\nBATCH_SIZE = 2\nNUM_EPOCHS = 100\nGRADIENT_ACCUMULATION_STEPS = 8\nMAX_CLASS_SIZE = 20 # float(\"inf\") for all\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(DEVICE)\n", "sub_path": "abbrev-trainer/constants.py", "file_name": "constants.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "360354813", "text": "\"\"\"Integer field class & utilities.\"\"\"\nfrom gettext import gettext as _\nfrom typing import Any\nfrom typing import Optional\nfrom typing import cast\n\nfrom pofy.core.constants import UNDEFINED\nfrom pofy.core.errors import ErrorCode\nfrom pofy.core.interfaces import ILoadingContext\nfrom pofy.core.validation import ValidateCallback\nfrom pofy.fields.base_field import ScalarField\n\n\nclass IntField(ScalarField):\n \"\"\"Integer YAML object field.\"\"\"\n\n def __init__(\n self,\n base: int = 0,\n minimum: Optional[int] = None,\n maximum: Optional[int] = None,\n required: bool = False,\n validate: Optional[ValidateCallback] = None,\n ):\n \"\"\"Initialize int field.\n\n Args:\n base: Base in which this field is encoded. By default, base is 0,\n meaning that python will distinguish automatically decimal,\n octal, and hexadecimal notations from the string.\n minimum: Minimum value for the field. If the value is out of bound,\n a VALIDATION_ERROR will be raised.\n maximum: Maximum value for the field. If the value is out of bound,\n a VALIDATION_ERROR will be raised.\n required: See BaseField constructor.\n validate: See BaseField constructor.\n\n \"\"\"\n super().__init__(required=required, validate=validate)\n self._base = base\n self._minimum = minimum\n self._maximum = maximum\n\n def _convert(self, context: ILoadingContext) -> Any:\n node = context.current_node()\n value = node.value\n result: Optional[int] = None\n\n try:\n result = int(value, self._base)\n except ValueError:\n context.error(\n ErrorCode.VALUE_ERROR,\n _('Can\\'t convert \"{}\" to an integer'), value\n )\n return UNDEFINED\n\n return cast(Optional[int], ScalarField._check_in_bounds(\n context,\n result,\n self._minimum,\n self._maximum\n ))\n", "sub_path": "pofy/fields/int_field.py", "file_name": "int_field.py", "file_ext": "py", "file_size_in_byte": 2064, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pofy.fields.base_field.ScalarField", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "pofy.core.validation.ValidateCallback", "line_number": 23, "usage_type": "name"}, {"api_name": "pofy.core.interfaces.ILoadingContext", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "pofy.core.errors.ErrorCode.VALUE_ERROR", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pofy.core.errors.ErrorCode", "line_number": 53, "usage_type": "name"}, {"api_name": "gettext.gettext", "line_number": 54, "usage_type": "call"}, {"api_name": "pofy.core.constants.UNDEFINED", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 58, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 58, "usage_type": "name"}, {"api_name": "pofy.fields.base_field.ScalarField._check_in_bounds", "line_number": 58, "usage_type": "call"}, {"api_name": "pofy.fields.base_field.ScalarField", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "93054590", "text": "# coding=utf-8\nfrom django.core.mail import EmailMultiAlternatives\nimport datetime\n\"\"\"\nNot finish yet\n\"\"\"\n\n\nclass Mail(object):\n def __init__(self):\n pass\n\n def send(self, operations, email):\n \"\"\"\n\n :param operations: [{'type': op_type, 'state': state, 'l_url':l_url, 'l_name': l_name,\n 'cate_eng': cate_eng, 'cate_chn': cate_chn}, ...]\n op_type: add/update\n state: 成功/失败\n :param email:\n :return:\n \"\"\"\n subject = '%s Update Report' % datetime.datetime.now().strftime('%Y-%m-%d %H:%M')\n text_content = 'content here'\n\n # content\n movie_list = []\n tv_list = []\n anime_list = []\n show_list = []\n for op in operations:\n if op.get('cate_eng') == 'movie':\n movie_list.append(op)\n elif op.get('cate_eng') == 'tv':\n tv_list.append(op)\n elif op.get('cate_eng') == 'anime':\n anime_list.append(op)\n elif op.get('cate_eng') == 'show':\n show_list.append(op)\n content = ''\n for item in (movie_list, tv_list, anime_list, show_list):\n for op in item:\n op.get('')\n content = ''.encode(\n 'utf8')\n html_content = open(\n BASE_DIR + '/templates/userinfo/mail/general_mail.html').read() \\\n .replace('subject_default', subject).replace('content_default',\n content).replace(\n 'link_default', '')\n\n from_email = '比格电影 '\n # from_email = 'bigedianying@gmail.com'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "sub_path": "spider/mail.py", "file_name": "mail.py", "file_ext": "py", "file_size_in_byte": 1857, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "141808738", "text": "from typing import List\nimport numpy as np\n\n\nclass BaskinModel(object):\n '''\n Basking model implementation\n Asumes 0.4 factor for endurance limit\n Suitable for carbon steels\n Not applicable for alloys\n '''\n\n # Fatigue chart resolution\n NO_OF_CHART_POINTS = 200\n\n def __init__(\n self,\n faitgue_stress: List[float],\n ult_strength: float,\n modification_factor: float,\n ):\n self.fatigue_stress = faitgue_stress\n self.ult_strength = ult_strength\n self.modification_factor = modification_factor\n\n def get_baskin_params(self, derated=True):\n '''\n Calculates Baskin parameters for the fatigue curve\n '''\n # If data should be raw, not derated, applied modification factor = 1\n if derated == False:\n self.modification_factor = 1\n self.endurance_limit = 0.4 * self.ult_strength * self.modification_factor\n\n def s_1000_factor():\n '''\n Calculates starting point for fatigue curve\n Based on Shigley data, depends on material ultimate strength\n '''\n if self.ult_strength < 130:\n return (\n -1.4218548015713e-07 * self.ult_strength ** 3\n + 0.0000563482426806003 * self.ult_strength ** 2\n - 0.00832826468826188 * self.ult_strength\n + 1.25431693640081\n )\n return (\n -3.30944038409e-09 * self.ult_strength ** 3\n + 3.31244407581022e-06 * self.ult_strength ** 2\n - 0.00134990048235594 * self.ult_strength\n + 0.936702621709383\n )\n\n self.B_factor = (\n -1\n / 3\n * np.log10(\n s_1000_factor()\n * self.modification_factor\n * self.ult_strength\n / self.endurance_limit\n )\n )\n self.C_factor = np.log10(\n (self.modification_factor * s_1000_factor() * self.ult_strength) ** 2\n / self.endurance_limit\n )\n\n def get_allowable_cycles(self):\n '''\n Calculates allowable cycles based on modification factor and fatigue stress\n @return: List[float]\n '''\n self.get_baskin_params()\n allowable_cycles = []\n for stress in self.fatigue_stress:\n if stress <= self.endurance_limit:\n allowable_cycles.append(10 ** 12)\n else:\n allowable_cycles.append(\n 10 ** (-self.C_factor / self.B_factor)\n * stress ** (1 / self.B_factor)\n )\n return allowable_cycles\n\n def get_damage(self, required_cycles: List[float]):\n '''\n Calculates fatigue damage based on raquired and allwable cycle values\n @return: List[float]\n '''\n damage = []\n allowable_cycles = self.get_allowable_cycles()\n for req_cycles, allow_cycle in zip(required_cycles, allowable_cycles):\n damage.append(round(req_cycles / allow_cycle, 3))\n return damage\n\n def get_chart_data(self, derated):\n '''\n Evaluate fatigue chart using Baskin model\n User can evaluated raw (derated=False) or derated curve\n @return: tuple[List[float]]\n '''\n self.get_baskin_params(derated)\n print(f\"Modification factor {self.modification_factor}\")\n cycle_range = np.linspace(\n 1000, 1_000_000, num=self.NO_OF_CHART_POINTS, endpoint=True\n )\n stress = [10 ** self.C_factor * item ** self.B_factor for item in cycle_range]\n return cycle_range, stress\n", "sub_path": "api/src/fatigue/fatiguelife.py", "file_name": "fatiguelife.py", "file_ext": "py", "file_size_in_byte": 3690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 64, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "617959963", "text": "from django.contrib.auth import views as auth_views\nfrom django.urls import include, path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.PostList.as_view(), name='index'),\n path('post/create/', views.PostCreate.as_view(), name='create'),\n path('post//details/', views.PostDetails.as_view(), name='details'),\n path('post//edit/', views.PostEdit.as_view(), name='edit'),\n path('post/drafts/', views.PostDrafts.as_view(), name='drafts'),\n path('post//publish/', views.PostPublish.as_view(), name='publish'),\n path('post//remove/', views.PostRemove.as_view(), name='remove'),\n path('accounts/', include('django.contrib.auth.urls')),\n # Function View\n # path('details//', views.post_detail, name='details'),\n]\n", "sub_path": "app/blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "46585154", "text": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nimport ARM\n\nclass TargetListBox(wx.ListBox):\n def __init__(self,parent,winId,boxSize,dp,rl):\n wx.ListBox.__init__(self,parent,winId,size=boxSize)\n self.dbProxy = dp\n self.theDimMenu = wx.Menu()\n self.theDimMenu.Append(armid.DIMLIST_MENUADD_ID,'Add')\n self.theDimMenu.Append(armid.DIMLIST_MENUDELETE_ID,'Delete')\n self.theRiskList = rl\n self.theSelectedValue = ''\n self.Bind(wx.EVT_RIGHT_DOWN,self.OnRightDown)\n wx.EVT_MENU(self.theDimMenu,armid.DIMLIST_MENUADD_ID,self.onAddDimension)\n wx.EVT_MENU(self.theDimMenu,armid.DIMLIST_MENUDELETE_ID,self.onDeleteDimension)\n\n def OnRightDown(self,evt):\n self.PopupMenu(self.theDimMenu)\n\n def onAddDimension(self,evt):\n targetList = self.dbProxy.targetNames(self.theRiskList.GetItems())\n from DimensionNameDialog import DimensionNameDialog\n dlg = DimensionNameDialog(self,'Target',targetList,'Add')\n if (dlg.ShowModal() == armid.DIMNAME_BUTTONACTION_ID):\n additionalDimension = dlg.dimensionName()\n self.Append(additionalDimension)\n self.theSelectedValue = additionalDimension\n\n def onDeleteDimension(self,evt):\n idx = self.GetSelection()\n if (idx == -1):\n errorText = 'No ' + self.theDimensionTable + ' selected'\n errorLabel = 'Delete ' + self.theDimensionTable\n dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n else:\n self.theSelectedValue = self.GetSelection()\n self.Delete(self.theSelectedValue)\n", "sub_path": "cairis/cairis/TargetListBox.py", "file_name": "TargetListBox.py", "file_ext": "py", "file_size_in_byte": 2311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "wx.ListBox", "line_number": 23, "usage_type": "attribute"}, {"api_name": "wx.ListBox.__init__", "line_number": 25, "usage_type": "call"}, {"api_name": "wx.ListBox", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 27, "usage_type": "call"}, {"api_name": "armid.DIMLIST_MENUADD_ID", "line_number": 28, "usage_type": "attribute"}, {"api_name": "armid.DIMLIST_MENUDELETE_ID", "line_number": 29, "usage_type": "attribute"}, {"api_name": "wx.EVT_RIGHT_DOWN", "line_number": 32, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 33, "usage_type": "call"}, {"api_name": "armid.DIMLIST_MENUADD_ID", "line_number": 33, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 34, "usage_type": "call"}, {"api_name": "armid.DIMLIST_MENUDELETE_ID", "line_number": 34, "usage_type": "attribute"}, {"api_name": "DimensionNameDialog.DimensionNameDialog", "line_number": 42, "usage_type": "call"}, {"api_name": "armid.DIMNAME_BUTTONACTION_ID", "line_number": 43, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 53, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 53, "usage_type": "attribute"}]} +{"seq_id": "591283925", "text": "# Python Moduals\nimport Queue \nimport platform\nimport time\nimport pandas as pd\n\n# Person python files\nimport data\nimport strategy\nimport portfolio\nimport execution\nimport visualize\n\n\n\nif platform.system() == \"Linux\":\n dirName = \"/home/nkippers/git/quantStartTrail1\"\nelse:\n dirName = \"/Users/noelkippers/git/quantStartTrail1\"\n\nevents = Queue.Queue()\nsymbol = [\"AAPL\"]\nstart_date = \"2014-01-10\"\n\nevents = Queue.Queue()\nbars = data.HistoricCSVDataHandler(events, dirName, symbol)\nstrategy = strategy.BuyAndHoldStrategy(bars, events)\nport = portfolio.NaivePortfolio(bars, events, start_date, initial_capital=100000.0)\nbroker = execution.SimulatedExecutionHandler(events)\nplotter = visualize.DataPlots(port)\n\n# Declare the components with respective parameters\n# bars = DataHandler(..)\n# strategy = Strategy(..)\n# port = Portfolio(..)\n# broker = ExecutionHandler(..)\n\nwhile True:\n # Update the bars (specific backtest code, as opposed to live trading)\n if bars.continue_backtest == True:\n bars.update_bars()\n else:\n break\n \n # Handle the events\n while True:\n try:\n event = events.get(False)\n except Queue.Empty:\n break\n else:\n if event is not None:\n# print event.type\n if event.type == 'MARKET':\n strategy.calculate_signals(event)\n port.update_timeindex(event)\n\n elif event.type == 'SIGNAL':\n port.update_signal(event)\n\n elif event.type == 'ORDER':\n broker.execute_order(event)\n\n elif event.type == 'FILL':\n port.update_fill(event)\n \n# time.sleep(1)\n # 10-Minute heartbeat\n# time.sleep(10*60)\nport.create_equity_curve_dataframe()\n# print port.output_summary_stats()\n# port.plot_summary()\n# plotter.plot_OHLC()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "platform.system", "line_number": 16, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 21, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 25, "usage_type": "call"}, {"api_name": "data.HistoricCSVDataHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "strategy.BuyAndHoldStrategy", "line_number": 27, "usage_type": "call"}, {"api_name": "portfolio.NaivePortfolio", "line_number": 28, "usage_type": "call"}, {"api_name": "execution.SimulatedExecutionHandler", "line_number": 29, "usage_type": "call"}, {"api_name": "visualize.DataPlots", "line_number": 30, "usage_type": "call"}, {"api_name": "Queue.Empty", "line_number": 49, "usage_type": "attribute"}, {"api_name": "strategy.calculate_signals", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "637916524", "text": "from pathlib import Path\nfrom os.path import getsize\nfrom shutil import copy\n\n\ndef print_d(mypath: Path) -> [Path]:\n\t'In the first menu between D and R, function for D'\n\ttemp = []\n\tfor x in mypath.iterdir():\n\t\tif x.is_file():\n\t\t\ttemp.append(x)\n\tb = sorted(temp)\n\treturn b\n\ndef print_r(mypath: Path) -> [Path]:\n\t'In the first menu between D and R, function for R'\n\ta.extend(print_d(mypath))\n\tfor x in sorted(mypath.iterdir()):\n\t\tif x.is_dir():\n\t\t\tprint_r(x)\n\treturn a\n\ndef print_a(b: [Path]) -> [Path]:\n\t'It is printing function exist mainly because many menu requires printing result'\n\tfor x in b:\n\t\tprint(x)\n\treturn b\n\ndef print_n(mystring: str) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for N'\n\tmylist = []\n\tfor x in a:\n\t\tif x.name == mystring:\n\t\t\tmylist.append(x)\n\treturn mylist\n\ndef print_e(mystring: str) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for E'\n\tmylist = []\n\tif mystring[0] == '.':\n\t\tmystring2 = mystring[1:]\n\telse:\n\t\tmystring2 = mystring\n\n\tfor x in a:\n\t\tif x.suffix[1:] == mystring2:\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef textcheck(mystring: str, filepath: Path) -> bool:\n\t'In the second menu among N, E, T, <, >, it is used for T.'\n\t'I left the function of checking text separate from making list when push T'\n\tthe_file = open(filepath, 'r')\n\twhile True:\n\t\tline = the_file.readline()\n\t\tif line.endswith('\\n'):\n\t\t\tline = line[:-1]\n\t\t\tif mystring in line:\n\t\t\t\tthe_file.close()\n\t\t\t\treturn True\n\t\telif line == '':\n\t\t\tthe_file.close()\n\t\t\treturn False\n\t\telse:\n\t\t\tthe_file.close()\n\t\t\treturn False\n\ndef print_t(mystring: str) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for T.'\n\tmylist = []\n\tfor x in a:\n\t\tif textcheck(mystring, x):\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef print_gt(myint: int) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for >.'\n\tmylist = []\n\tfor x in a:\n\t\tif getsize(x) > myint:\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef print_lt(myint: int) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for <.'\n\tmylist = []\n\tfor x in a:\n\t\tif getsize(x) < myint:\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef f_check(mylist: [Path]) -> None:\n\t'In the third menu among F D T, it is for F.'\n\tfor x in mylist:\n\t\ttry:\n\t\t\tthe_file = open(x, 'r')\n\t\t\tline = the_file.readline()\n\t\t\tline = line[:-1]\n\t\t\tprint(line)\n\t\texcept:\n\t\t\tprint('NOT TEXT')\n\tthe_file.close()\n\ndef d_check(mylist: [Path]) -> None:\n\t'In the third menu among F D T, it is for D.'\n\tfor x in mylist:\n\t\ty = str(x) + \".dup\"\n\t\tcopy(x, y)\n\ndef t_check(mylist: [Path]) -> None:\n\t'In the third menu among F D T, it is for T.'\n\tfor x in mylist:\n\t\tx.touch()\n\ndef main_menu() -> list:\n\t'It is the first menu'\n\tmyloop = True\n\twhile myloop:\n\t\tmyinput = input('')\n\t\tmypath = Path(myinput[2:])\n\t\tif ((myinput.startswith('D') or myinput.startswith('R')) \n\t\tand(len(myinput)>2) and (myinput[1] == ' ') and (mypath.exists())):\n\t\t\tmyloop = False\n\t\t\tif myinput[0] == 'D':\n\t\t\t\td = print_d(mypath)\n\t\t\t\tprint_a(d)\n\t\t\t\treturn d\n\t\t\telif myinput[0] == 'R':\n\t\t\t\tr = print_r(mypath)\n\t\t\t\tprint_a(r)\n\t\t\t\treturn r\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\t\ndef second_menu(b: [Path]) -> [Path]:\n\t'It is the second menu'\n\tmyloop = True\n\twhile myloop:\n\t\tmyinput = input('')\n\t\tmystring = myinput[2:]\n\t\tif myinput == 'A':\n\t\t\tmyloop = False\n\t\t\tprint_a(b)\n\t\telif ((myinput.startswith('N') or myinput.startswith('E') or \n\t\tmyinput.startswith('T') or myinput.startswith('<') or \n\t\tmyinput.startswith('>')) and (len(myinput)>2) and (myinput[1] == ' ')):\n\t\t\tmyloop = False\n\t\t\tif myinput[0] == 'N':\n\t\t\t\tn = print_n(mystring)\n\t\t\t\tprint_a(n)\n\t\t\t\treturn n\n\t\t\telif myinput[0] == 'E':\n\t\t\t\te = print_e(mystring)\n\t\t\t\tprint_a(e)\n\t\t\t\treturn e\n\t\t\telif myinput[0] == 'T':\n\t\t\t\tt = print_t(mystring)\n\t\t\t\tprint_a(t)\n\t\t\t\treturn t\n\t\t\telif myinput[0] == '<':\n\t\t\t\tlt = print_lt(int(mystring))\n\t\t\t\tprint_a(lt)\n\t\t\t\treturn lt\n\t\t\telif myinput[0] == '>':\n\t\t\t\tgt = print_gt(int(mystring))\n\t\t\t\tprint_a(gt)\n\t\t\t\treturn gt\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\ndef third_menu(b: Path) -> None:\n\t'It is the third menu'\n\tmyloop = True\n\twhile myloop:\n\t\tmyinput = input('')\n\t\tif ((myinput == 'F') or (myinput == 'D') or (myinput == 'T')):\n\t\t\tmyloop = False\n\t\t\tif myinput == 'F':\n\t\t\t\tf_check(b)\n\t\t\telif myinput == 'D':\n\t\t\t\td_check(b)\n\t\t\telif myinput == 'T':\n\t\t\t\tt_check(b)\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\nif __name__ == '__main__':\n\ta = []\n\tmyfirstmenu = main_menu()\n\tmysecondmenu = second_menu(myfirstmenu)\n\n\tthird_menu(mysecondmenu)\n\n", "sub_path": "project1/project1_09.py", "file_name": "project1_09.py", "file_ext": "py", "file_size_in_byte": 4419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pathlib.Path", "line_number": 6, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 82, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 91, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 87, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 96, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 108, "usage_type": "name"}, {"api_name": "shutil.copy", "line_number": 112, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 114, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 124, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 139, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 175, "usage_type": "name"}]} +{"seq_id": "351357956", "text": "#-*- coding: utf-8 -*-\nimport pymysql\nimport urllib.request\nimport json\nimport time\nimport sys\nimport re\nfrom bs4 import BeautifulSoup\nfrom enum import Enum\nimport hashlib\nfrom datetime import timedelta, timezone, datetime\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom queue import Queue\nfrom collections import OrderedDict\nimport schedule # pip install schedule, https://github.com/dbader/schedule\nfrom time import gmtime, strftime\nimport pytz\n\nfrom dbConfig import *\n\n\ncoin_name_list = ['BTC', 'ETH', 'DASH', 'LTC', 'ETC', 'XRP', 'BCH', 'XMR', 'ZEC'] # 9개 (QTUM 제외)\n# coin_name_list = ['ETH', 'DASH', 'LTC', 'ETC', 'XRP', 'BCH', 'XMR', 'ZEC'] # 8개\n\nlength_process = 100\n\ninsert_trade_sql = \"INSERT INTO `TRADE_{0:s}` (`date`, `exchange_rate`, `price`, `price2`, `amount`, `total`, `type`, `exchange`, `count`, `trade_id`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\nselect_trade_sql = \"SELECT count(*) FROM `TRADE_{0:s}` WHERE `trade_id`=%s\"\nselect_in_sql = \"SELECT `trade_id` FROM `TRADE_{0:s}` WHERE %s\"\nselect_trade_all_sql = \" UNION \".join(list(map(lambda x: \"(SELECT '\" + x + \"', `price2`, `date` FROM trade.TRADE_\" + x + \" order by id desc limit 1)\", coin_name_list)))\nselect_in_sql = \"SELECT `trade_id` FROM `TRADE_{0:s}` WHERE %s\"\nselect_f_exchange_sql = \"SELECT * FROM `F_EXCHANGE` WHERE `timestamp`=%s and `quote`=%s;\"\ninsert_f_exchange_sql = \"INSERT INTO `F_EXCHANGE` (`timestamp`, `quote`) VALUES (%s, %s);\"\n\nclass Terms_Bithumb(Enum):\n sell = \"bid\"\n buy = \"ask\"\n\nclass Terms_Poloniex(Enum):\n sell = \"sell\"\n buy = \"buy\"\n\ncumulative_bithumb_call_count = 0\ncumulative_poloniex_call_count = 0\n\nprevious_t_bithumb = {}\nprevious_t_poloniex = {}\nexist_bithumb_table_records = {}\n\nfor coin_name in coin_name_list:\n previous_t_bithumb[coin_name] = [{\"transaction_date\":\"1511031406\",\"type\":\"ask\",\"units_traded\":\"0.042\",\"price\":\"9047000\",\"total\":\"379974\"}]\n previous_t_poloniex[coin_name] = [\n {\"globalTradeID\": 266012461, \"tradeID\": 12459517, \"date\": \"2017-11-22 01:34:12\", \"type\": \"sell\",\n \"rate\": \"8101.67292206\", \"amount\": \"0.05619519\", \"total\": \"455.27504917\"}]\n exist_bithumb_table_records[coin_name] = OrderedDict()\n\nrecent_foreign_exchange_rate = (\"1092.90\", \"2017-11-22 11:16:00\")\n\nclass ConnectionPool():\n \"\"\"\n Usage:\n conn_pool = ConnectionPool(max_pool_size = 5)\n conn = conn_pool.get_connection()\n conn_pool.return_connection(db)\n conn_pool.close()\n \"\"\"\n def __init__(self, max_pool_size=5):\n self.max_pool_size = max_pool_size\n self.initialize_pool()\n\n def initialize_pool(self):\n self.pool = Queue(maxsize=self.max_pool_size)\n for _ in range(0, self.max_pool_size):\n self.pool.put_nowait(\n pymysql.connect(host=dbURL,\n port=dbPort,\n user=dbUser,\n passwd=dbPass,\n db=dbName,\n charset='utf8mb4',\n use_unicode=True\n )\n )\n\n def get_connection(self):\n # returns a conn instance when one is available else waits until one is\n conn = self.pool.get(True)\n\n # checks if conn is still connected because conn instance automatically closes when not in used\n if not self.ping(conn):\n conn.connect()\n\n return conn\n\n def return_connection(self, conn):\n return self.pool.put_nowait(conn)\n\n def close(self):\n while not self.is_empty():\n self.pool.get().close()\n\n def ping(self, conn):\n data = conn.query('SELECT 1', [])\n return data\n\n def get_initialized_connection_pool(self):\n return self.pool\n\n def is_empty(self):\n return self.pool.empty()\n\nconn_pool = ConnectionPool(max_pool_size = 5)\n\ndef by_trade_timestamp(trade):\n return trade['transaction_date']\n\ndef by_trade_date(trade):\n return trade['date']\n\ndef utc_to_asia_seoul(utc_dt):\n local_tz = pytz.timezone('Asia/Seoul')\n local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)\n return local_tz.normalize(local_dt)\n\ndef get_trade_bithumb(coin_name, conn):\n global previous_t_bithumb\n global exist_bithumb_table_records\n\n try:\n url_s = 'https://api.bithumb.com/public/recent_transactions/' + coin_name + '?count=' + str(length_process)\n raw_read = urllib.request.urlopen(url_s).read()\n t_bithumb = json.loads(raw_read)['data']\n except BaseException as e:\n print(\"Bithumb API Exception!!! - \", e)\n return 0\n\n for trade in t_bithumb:\n timestamp = int(time.mktime(datetime.strptime(trade['transaction_date'], \"%Y-%m-%d %H:%M:%S\").timetuple()))\n trade['transaction_date'] = timestamp\n\n t_bithumb.sort(key=by_trade_timestamp, reverse=True)\n\n last_t_trade = t_bithumb[-1]\n\n found_same = False\n for p_idx, p_trade in enumerate(previous_t_bithumb[coin_name]):\n if p_trade['transaction_date'] == last_t_trade['transaction_date'] and \\\n p_trade['units_traded'] == last_t_trade['units_traded'] and \\\n p_trade['price'] == last_t_trade['price'] and \\\n p_trade['type'] == last_t_trade['type'] and \\\n p_trade['total'] == last_t_trade['total']:\n found_same = True\n break\n if found_same:\n new_trade_list = t_bithumb[:len(t_bithumb) - p_idx - 1]\n else:\n new_trade_list = t_bithumb\n\n previous_t_bithumb[coin_name] = sorted(t_bithumb, key=by_trade_timestamp, reverse=True)\n\n if len(new_trade_list) > 0:\n new_trade_list.reverse()\n trade_id_count = {}\n for trade in new_trade_list:\n date = trade['transaction_date']\n amount = trade['units_traded']\n price = trade['price']\n type = Terms_Bithumb(trade['type']).name\n total = trade['total']\n trade_id = hashlib.sha224((str(date) + amount + price + type + total).encode('utf-8')).hexdigest()\n\n if trade_id in exist_bithumb_table_records[coin_name].keys():\n trade_id_count[trade_id] = exist_bithumb_table_records[coin_name][trade_id] + 1\n else:\n if trade_id in trade_id_count.keys():\n trade_id_count[trade_id] += 1\n else:\n trade_id_count[trade_id] = 1\n try:\n date = datetime.fromtimestamp(trade['transaction_date']).strftime('%Y-%m-%d %H:%M:%S')\n exist_bithumb_table_records[coin_name][trade_id] = trade_id_count[trade_id]\n cursor = conn.cursor()\n cursor.execute(\n insert_trade_sql.format(coin_name),\n (date, str(1.0), price, price, amount, total, type, 'bithumb', trade_id_count[trade_id], trade_id)\n )\n except Exception as e:\n print(\"Bithumb Insert Exception\", e)\n pass\n conn.commit()\n\n over_num_queue = len(exist_bithumb_table_records[coin_name]) - length_process * 2\n if over_num_queue > 0:\n for _ in range(over_num_queue):\n exist_bithumb_table_records[coin_name].popitem(last=False)\n\n return len(new_trade_list)\n\ndef get_trade_poloniex(coin_name, conn):\n global previous_t_poloniex\n\n try:\n url_s = 'https://poloniex.com/public?command=returnTradeHistory¤cyPair=USDT_' + coin_name + '&limit=' + str(length_process)\n raw_read = urllib.request.urlopen(url_s).read()\n t_poloniex = json.loads(raw_read)\n except BaseException as e:\n print(\"Poloniex API Exception!!! - \", e)\n return 0\n\n\n for trade in t_poloniex:\n timestamp = int(time.mktime(datetime.strptime(trade['date'], \"%Y-%m-%d %H:%M:%S\").timetuple()))\n trade['date'] = timestamp\n\n t_poloniex.sort(key=by_trade_date, reverse=True)\n\n last_t_trade = t_poloniex[-1]\n\n found_same = False\n for p_idx, p_trade in enumerate(previous_t_poloniex[coin_name]):\n if p_trade['globalTradeID'] == last_t_trade['globalTradeID']:\n found_same = True\n break\n if found_same:\n new_trade_list = t_poloniex[:len(t_poloniex) - p_idx - 1]\n else:\n new_trade_list = t_poloniex\n\n previous_t_poloniex[coin_name] = sorted(t_poloniex, key=by_trade_date, reverse=True)\n\n if len(new_trade_list) > 0:\n new_trade_list.reverse()\n trade_id_count = {}\n for trade in new_trade_list:\n date = datetime.fromtimestamp(trade['date']) + timedelta(hours=9)\n date = datetime.fromtimestamp(date.timestamp()).strftime('%Y-%m-%d %H:%M:%S')\n exchange_rate = float(recent_foreign_exchange_rate[0])\n price = trade['rate']\n price2 = float(trade['rate']) * exchange_rate\n amount = trade['amount']\n type = Terms_Poloniex(trade['type']).name\n total = float(trade['amount']) * price2\n trade_id = str(trade['globalTradeID'])\n\n try:\n\n cursor = conn.cursor()\n cursor.execute(\n insert_trade_sql.format(coin_name),\n (date, str(exchange_rate), price, str(price2), amount, str(total), type, 'poloniex', 1, trade_id)\n )\n except Exception as e:\n print(\"Poloniex Insert Exception\", e)\n pass\n conn.commit()\n\n return len(new_trade_list)\n\ndef get_foreign_exchange():\n def getPage(url):\n \"\"\"\n url 정보의 내용을 조회한다.\n \"\"\"\n try:\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req)\n content = res.read()\n except:\n content = \"\"\n\n return content\n\n def getExchangeOfNation(soup):\n dicExchange = {}\n\n alpha = '([A-Z]+)'\n\n for item in soup.table('tr')[2:]:\n # 정보 파싱\n nation = item('td')[0].text.strip()\n re_result = re.search(alpha, nation)\n nation = re_result.groups()[0]\n\n basicRateOfExchange = item('td')[1].text # 매매기준환율\n cash_buy = item('td')[2].text # 현찰 살때\n cash_sell = item('td')[3].text # 현찰 팔때\n transfer_send = item('td')[4].text # 송금 보낼 때\n transfer_receive = item('td')[5].text # 송금 받을 때\n\n dicExchange[nation] = {'basicRate': basicRateOfExchange, 'cashBuy': cash_buy, \\\n 'cashSell': cash_sell, 'transferSend': transfer_send,\n 'transferReceive': transfer_receive}\n\n return dicExchange\n\n # naver 환율 페이지 조회\n url = \"http://info.finance.naver.com/marketindex/exchangeList.nhn\"\n\n # page 내용을 조회한다.\n try:\n res = getPage(url)\n\n soup = BeautifulSoup(res, 'html.parser')\n nationExchangeRate = getExchangeOfNation(soup)\n except BaseException as e:\n print(\"get_foreign_exchange - Exception!!! - \", e)\n return\n\n # 최신 정보로 변경\n global recent_foreign_exchange_rate\n new_rate = nationExchangeRate['USD']['basicRate'].replace(',','')\n now = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n print(\"Foreign Exchange Rate Changed - \", new_rate, now)\n recent_foreign_exchange_rate = (new_rate, now)\n\ndef gmail_send():\n conn = conn_pool.get_connection()\n cursor = conn.cursor()\n cursor.execute(select_trade_all_sql)\n rows = cursor.fetchall()\n\n msg_str = \"\"\n btc_price = None\n for row in rows:\n coin_name = str(row[0])\n if coin_name == 'BTC':\n btc_price = str(row[1])\n msg_str += coin_name + \" - Price: \" + str(row[1]) + \" - Date: \" + str(row[2]) + \"
\"\n\n msg_exchange = \"Basic Exchange Rate: \" + recent_foreign_exchange_rate[0] + \" - Date:\" + recent_foreign_exchange_rate[1]\n\n msg_content = '

[Trading Information Collection Status]


{msg_str}

{msg_exchange}'.format(\n msg_str=msg_str,\n msg_exchange=msg_exchange\n )\n message = MIMEText(msg_content, 'html')\n\n message['From'] = 'ManuscriptLink '\n message['To'] = 'Youn-Hee Han '\n message['Subject'] = 'Trading Information Collection Status (BTC: {btc_price})'.format(btc_price=btc_price)\n\n msg_full = message.as_string()\n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(email_account, email_password)\n server.sendmail(email_account, ['support@thinkonweb.com'], msg_full)\n server.quit()\n print(\"Gmail Sent! -\", strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n conn_pool.return_connection(conn)\n\nif __name__ == \"__main__\":\n schedule.every().day.at(\"10:30\").do(gmail_send)\n #schedule.every().minute.do(gmail_send)\n schedule.every().minute.do(get_foreign_exchange)\n # schedule.every().day.at(\"10:30\").do(job)\n # schedule.every(5).to(10).minutes.do(job)\n # schedule.every().monday.do(job)\n # schedule.every().wednesday.at(\"13:15\").do(job)\n\n start_time = datetime.now(timezone.utc)\n\n try:\n while True:\n schedule.run_pending()\n conn = conn_pool.get_connection()\n for coin_name in coin_name_list:\n insert_count_bithumb = get_trade_bithumb(coin_name, conn)\n insert_count_poloniex = get_trade_poloniex(coin_name, conn)\n\n cumulative_bithumb_call_count += 1\n cumulative_poloniex_call_count += 1\n\n print(\"{0:6s}: New Bithumb Trade:{1:3d}, New Poloniex Trade:{2:3d} - {3:s}\".format(\n \"[\" + coin_name + \"]\",\n insert_count_bithumb,\n insert_count_poloniex,\n str(utc_to_asia_seoul(datetime.now(timezone.utc)))\n ))\n sys.stdout.flush()\n\n elapsed_time = (datetime.now(timezone.utc) - start_time).seconds\n print(\" Bithumb API Call Rate: {:5.2f} calls/sec. (It should be less than 20 calls/sec.)\".format(cumulative_bithumb_call_count / elapsed_time))\n print(\"Poloniex API Call Rate: {:5.2f} calls/sec. (It should be less than 6 calls/sec.)\".format(cumulative_poloniex_call_count / elapsed_time))\n print()\n\n conn_pool.return_connection(conn)\n except BaseException as e:\n print(e)\n finally:\n print(\"Finally!!!!\")\n conn_pool.close()", "sub_path": "0.Common/3.CoinTrading/trade_info_collector.py", "file_name": "trade_info_collector.py", "file_ext": "py", "file_size_in_byte": 14610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "enum.Enum", "line_number": 36, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 40, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 56, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 73, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 76, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 122, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 123, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlopen", "line_number": 132, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 132, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 132, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 133, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "name"}, {"api_name": "hashlib.sha224", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 181, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 205, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 205, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 205, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 206, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 213, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 236, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 236, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 236, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 237, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 266, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 266, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 266, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 267, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 267, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 267, "usage_type": "name"}, {"api_name": "re.search", "line_number": 282, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 304, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 313, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 313, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 337, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 345, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 350, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 350, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 354, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 356, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 362, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 362, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 362, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 362, "usage_type": "name"}, {"api_name": "schedule.run_pending", "line_number": 366, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 379, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 379, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 379, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 379, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 381, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 381, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 383, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 383, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 383, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 383, "usage_type": "name"}]} +{"seq_id": "55576176", "text": "import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nPATH = 'D:/bookPic/'\r\nfilename = 'rada2.jpg'\r\n\r\n#img = cv2.imread('wrongPath/rada2.jpg')\r\nimg = cv2.imread(PATH+filename)\r\nprint(type(img))\r\nif not isinstance(img, np.ndarray):\r\n print('Unsuccessfully load the image \"{}\"'.format(filename))\r\n exit()\r\nRGB_img = cv2.cvtColor(img, cv2.COLOR_B)\r\n#downsize the image to half of the original\r\nnew_width, new_height = int(RGB_img.shape[1]/2), int(RGB_img.shape[0]/2)\r\nRGB_resize = cv2.resize(RGB_img.copy(), (new_width, new_height))\r\n#save the images\r\ncv2.imwrite('C:/test/rada2_resize.jpg',RGB_resize)\r\ncv2.imwrite('C:/test/rada2.jpg', img)\r\n\r\nax1 = plt.subplot(1, 3, 1)\r\nplt.imshow(img)\r\nax1.set_title('img')\r\nax2 = plt.subplot(1, 3, 2, yticklabels = [])\r\nplt.imshow(RGB_img)\r\nax2.set_title('RGB_img')\r\nax3 = plt.subplot(1, 3, 3, yticklabels = [])\r\nplt.imshow(RGB_resize)\r\nax3.set_title('RGB_resize')\r\nplt.show()", "sub_path": "ex 3.3.py", "file_name": "ex 3.3.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_B", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "601031311", "text": "from dataLoaders import DigitData\nfrom catalyst.dl import SupervisedRunner, CallbackOrder, Callback, CheckpointCallback\nfrom config import *\nfrom funcs import get_dict_from_class\nfrom models import FeatureExtractor,FCLayered\nfrom losses import BCELoss\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nfrom catalyst import dl\nfrom callbacks import MetricsCallback\nfrom sklearn.model_selection import StratifiedKFold\nimport torch\ndef train(Model1,DataLoad1):\n randSeed=23\n data_load = DigitData(**get_dict_from_class(DataLoad1))\n criterion = BCELoss()\n model = FeatureExtractor(**get_dict_from_class(Model1))\n # model = FCLayered(**get_dict_from_class(Model1))\n if False:\n checkpoint = torch.load(str(saveDirectory) + '/featureExtr_4_100.pth')\n model.load_state_dict(checkpoint)\n model.eval()\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n skf = StratifiedKFold(n_splits=15, shuffle=True, random_state=randSeed)\n train = data_load.data\n train[\"fold\"] = -1\n\n # train.set_index('index',inplace=True)\n for fold_id, (train_index, val_index) in enumerate(skf.split(train, train[\"fold\"])):\n train.iloc[val_index, -1] = fold_id\n\n # # check the proportion\n fold_proportion = pd.pivot_table(train, columns=\"fold\", values=\"label\", aggfunc=len)\n\n use_fold = 0\n\n train_file = train.query(\"fold != @use_fold\")\n val_file = train.query(\"fold == @use_fold\")\n\n print(\"[fold {}] train: {}, val: {}\".format(use_fold, len(train_file), len(val_file)))\n\n loaders = {\n \"train\": DataLoader(DigitData(data_frame=train_file, **get_dict_from_class(DataLoad1)),\n batch_size=512,\n shuffle=False,\n num_workers=4,\n pin_memory=True,\n drop_last=False),\n \"valid\": DataLoader(DigitData(data_frame=val_file, **get_dict_from_class(DataLoad1)),\n batch_size=512,\n shuffle=False,\n num_workers=4,\n pin_memory=True,\n drop_last=False)\n }\n\n callbacks = [\n dl.AccuracyCallback(input_key=\"logits\", target_key=\"targets\", num_classes=10, topk_args=[1]),\n\n MetricsCallback(input_key=\"targets\", output_key=\"logits\",\n directory=saveDirectory, model_name='featureExtr_4'),\n # CheckpointCallback(save_n_best=0)\n ]\n runner = SupervisedRunner(\n\n output_key=\"logits\",\n input_key=\"image_pixels\",\n target_key=\"targets\")\n # scheduler=scheduler,\n\n runner.train(\n model=model,\n criterion=criterion,\n loaders=loaders,\n optimizer=optimizer,\n\n num_epochs=epoch,\n verbose=True,\n logdir=f\"fold0\",\n callbacks=callbacks,\n )\n\n # main_metric = \"epoch_f1\",\n # minimize_metric = False\n c = 0\nif __name__ == \"__main__\":\n train(Model1,DataLoad1)", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "dataLoaders.DigitData", "line_number": 16, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 16, "usage_type": "call"}, {"api_name": "losses.BCELoss", "line_number": 17, "usage_type": "call"}, {"api_name": "models.FeatureExtractor", "line_number": 18, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 24, "usage_type": "name"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.pivot_table", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 45, "usage_type": "call"}, {"api_name": "dataLoaders.DigitData", "line_number": 45, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "dataLoaders.DigitData", "line_number": 51, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 51, "usage_type": "call"}, {"api_name": "catalyst.dl.AccuracyCallback", "line_number": 60, "usage_type": "call"}, {"api_name": "catalyst.dl", "line_number": 60, "usage_type": "name"}, {"api_name": "callbacks.MetricsCallback", "line_number": 62, "usage_type": "call"}, {"api_name": "catalyst.dl.SupervisedRunner", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "496942654", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('jla', '0003_auto_20160822_1013'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='colaboradores',\n name='colnivel',\n field=models.IntegerField(null=True, db_column='ColNivel', blank=True),\n ),\n migrations.AlterModelTable(\n name='recibos',\n table='jla_recibos',\n ),\n ]\n", "sub_path": "jla/migrations/0004_auto_20160823_0638.py", "file_name": "0004_auto_20160823_0638.py", "file_ext": "py", "file_size_in_byte": 548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelTable", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "284991198", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/evogrid/caching/ram.py\n# Compiled at: 2006-08-10 15:57:20\n\"\"\"RAM-based cache implementation\n\nThis RAM cache is inspired on zope.app.cache.ram but a bit simpler cause we\ndon't want to inherit from ``Persistent`` and has a slightly different\ninterface as well.\n\nThe original implementation of RAMCache is copyright Zope Corporation and\ncontributors and is distributed under the terms of the Zope Public License.\n\"\"\"\nfrom cPickle import dumps\nfrom evogrid.caching.interfaces import ICache\nfrom threading import Lock\nfrom zope.interface import implements\n_marker = object()\n\nclass RAMCache(object):\n \"\"\"Cache implementation that stores entries in a python dict\"\"\"\n __module__ = __name__\n implements(ICache)\n hits = 0\n misses = 0\n max_entries = None\n\n def __init__(self, max_entries=None):\n self.max_entries = max_entries\n self._store = {}\n self._sorted_keys = []\n self._lock = Lock()\n\n def __len__(self):\n return len(self._store)\n\n def invalidate(self, key=None):\n if key is None:\n self._lock.acquire()\n try:\n self._store.clear()\n del self._sorted_keys[:]\n finally:\n self._lock.release()\n else:\n key = self._buildKey(key)\n if key not in self._store:\n return\n self._lock.acquire()\n try:\n if key in self._store:\n del self._store[key]\n self._sorted_keys.remove(key)\n finally:\n self._lock.release()\n return\n\n def query(self, key, default=None):\n \"\"\"Search the store to find a matching entry\n\n If nothing is found return default. If a matching entry is found,\n the _sorted_keys list order is updated. The misses and hits counters\n are updated.\n \"\"\"\n key = self._buildKey(key)\n _store, _sorted_keys = self._store, self._sorted_keys\n result = _store.get(key, _marker)\n if result is _marker:\n self.misses += 1\n return default\n self._lock.acquire()\n try:\n if key in _store:\n _sorted_keys.remove(key)\n _sorted_keys.insert(0, key)\n finally:\n self._lock.release()\n self.hits += 1\n return result\n\n def set(self, key, data):\n \"\"\"Add data to the store\n\n Check that the store size does not exceed ``max_entries``.\n \"\"\"\n key = self._buildKey(key)\n _store, _sorted_keys = self._store, self._sorted_keys\n if key in _store and _store[key] == data:\n return\n self._lock.acquire()\n try:\n if key not in _store:\n len_self = len(self)\n max_entries = self.max_entries\n if max_entries is not None and len_self >= max_entries:\n for i in xrange(len_self - max_entries + 1):\n del _store[_sorted_keys.pop()]\n\n _store[key] = data\n _sorted_keys.insert(0, key)\n finally:\n self._lock.release()\n return\n\n def _buildKey(kw):\n \"\"\"Build a tuple which can be used as an index for a cached value\"\"\"\n k = tuple(sorted(kw.iteritems()))\n try:\n return hash(k)\n except TypeError:\n return dumps(k)\n\n _buildKey = staticmethod(_buildKey)", "sub_path": "pycfiles/evogrid-0.1.0-py2.4/ram.py", "file_name": "ram.py", "file_ext": "py", "file_size_in_byte": 3622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "zope.interface.implements", "line_number": 25, "usage_type": "call"}, {"api_name": "evogrid.caching.interfaces.ICache", "line_number": 25, "usage_type": "argument"}, {"api_name": "threading.Lock", "line_number": 34, "usage_type": "call"}, {"api_name": "cPickle.dumps", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "105357986", "text": "import boto3\nimport os\n\nec2 = boto3.resource('ec2')\ninstance_name = os.environ['instance_name']\n\ndef lambda_handler(event, context):\n print ('Enters into the function')\n instances = ec2.instances.filter(\n Filters=[{'Name': 'tag:Name', 'Values': [instance_name]}]).stop()", "sub_path": "stop_instance_by_tagname.py", "file_name": "stop_instance_by_tagname.py", "file_ext": "py", "file_size_in_byte": 279, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "boto3.resource", "line_number": 4, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "7599653", "text": "import pickle\r\nfrom io import StringIO\r\n\r\nfrom flask import Flask, request, make_response, Response, send_file\r\n\r\nfrom flasgger import Swagger\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics import accuracy_score\r\nimport pickle\r\n\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\napp = Flask(__name__)\r\nSwagger(app)\r\n\r\npickle_in = open(\"classifier.pkl\", \"rb\")\r\nclassifier = pickle.load(pickle_in)\r\npicky_in = open(\"cluster.pkl\", \"rb\")\r\nclusts = pickle.load(picky_in)\r\n\r\n\r\n@app.route('/')\r\ndef welcome():\r\n return \"Welcome All\"\r\n\r\n\r\n@app.route('/predict_file', methods=[\"POST\"])\r\ndef predict_note_file1():\r\n \"\"\"Let's Authenticate the Banks Note\r\n This is using docstrings for specifications.\r\n ---\r\n parameters:\r\n - name: file\r\n in: formData\r\n type: file\r\n required: true\r\n\r\n responses:\r\n 200:\r\n description: The output values\r\n\r\n \"\"\"\r\n df_test = pd.read_csv(request.files.get(\"file\"))\r\n print(df_test.head())\r\n prediction = classifier.predict(df_test)\r\n\r\n return str(list(prediction))\r\n\r\n\r\n@app.route('/predict_similar', methods=[\"POST\"])\r\ndef predict_note_file():\r\n \"\"\"Let's Cluster the Test cases for similarity Level\r\n This is using docstrings for specifications.\r\n ---\r\n parameters:\r\n - name: file\r\n in: formData\r\n type: file\r\n required: true\r\n\r\n responses:\r\n 200:\r\n description: The output values\r\n\r\n \"\"\"\r\n df_test = pd.read_csv(request.files.get(\"file\"), encoding='unicode_escape')\r\n df_test = df_test.dropna(axis=0, how='any')\r\n df_test['combine6'] = df_test.iloc[:, 1] + df_test.iloc[:, 2] + df_test.iloc[:, 3]\r\n vec = TfidfVectorizer(stop_words=\"english\", ngram_range=(1, 3))\r\n vec.fit(df_test.combine6.values)\r\n features = vec.transform(df_test.combine6.values)\r\n\r\n clustr = KMeans(init='k-means++', n_clusters=5, n_init=10)\r\n clustr.fit(features)\r\n df_test['cluster_labels'] = clustr.labels_\r\n output = StringIO()\r\n df_test.to_csv(output)\r\n return Response(output.getvalue(), mimetype=\"text/csv\")\r\n # return \"Check the file is generated\"\r\n # --resp = make_response(df_test.to_csv())\r\n # resp.headers[\"Content-Disposition\"] = (\"attachment; filename=%s\" % filename)\r\n # resp.headers[\"Content-Disposition\"] = \"attachment; filename=export.csv\"\r\n # --resp.headers[\"Content-Type\"] = \"text/csv\"\r\n # resp.headers[\"Content-Disposition\"] = (\"attachment; filename=%s\" % filename)\r\n # --return resp\r\n # & buffer = StringIO()\r\n # & df_test.to_csv(buffer, encoding='utf-8')\r\n # & buffer.seek(0)\r\n # & return send_file(buffer, attachment_filename=\"test.csv\", mimetype='text/csv')\r\n\r\n\r\n# return make_response(df_test.to_csv(), mimetype=\"text/csv\")\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2846, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "flasgger.Swagger", "line_number": 16, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.files.get", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.files.get", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 75, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "135489843", "text": "# ------------------------------------------------------------------------------------------------ #\n# MIT License #\n# #\n# Copyright (c) 2020, Microsoft Corporation #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #\n# and associated documentation files (the \"Software\"), to deal in the Software without #\n# restriction, including without limitation the rights to use, copy, modify, merge, publish, #\n# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in all copies or #\n# substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #\n# ------------------------------------------------------------------------------------------------ #\n\nfrom functools import partial\nfrom collections import namedtuple\n\nimport jax\nimport jax.numpy as jnp\nimport haiku as hk\nfrom gym.spaces import Discrete, Box\n\nfrom .._base.test_case import TestCase\nfrom ..utils import safe_sample\nfrom .stochastic_v import StochasticV\n\ndiscrete = Discrete(7)\nboxspace = Box(low=0, high=1, shape=(3, 5))\nnum_bins = 20\n\nEnv = namedtuple('Env', ('observation_space', 'action_space'))\n\n\ndef func(S, is_training):\n batch_norm = hk.BatchNorm(False, False, 0.99)\n logits = hk.Sequential((\n hk.Flatten(),\n hk.Linear(8), jax.nn.relu,\n partial(hk.dropout, hk.next_rng_key(), 0.25 if is_training else 0.),\n partial(batch_norm, is_training=is_training),\n hk.Linear(8), jnp.tanh,\n hk.Linear(num_bins),\n ))\n return {'logits': logits(S)}\n\n\nclass TestStochasticV(TestCase):\n def test_init(self):\n StochasticV(func, Env(boxspace, boxspace), (-10, 10), num_bins=num_bins)\n StochasticV(func, Env(boxspace, discrete), (-10, 10), num_bins=num_bins)\n StochasticV(func, Env(discrete, boxspace), (-10, 10), num_bins=num_bins)\n StochasticV(func, Env(discrete, discrete), (-10, 10), num_bins=num_bins)\n\n # test_call_* ##################################################################################\n\n def test_call_discrete(self):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_, logp = v(s, return_logp=True)\n print(v_, logp, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n self.assertArraySubdtypeFloat(logp)\n self.assertArrayShape(logp, ())\n\n def test_call_boxspace(self):\n env = Env(boxspace, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_, logp = v(s, return_logp=True)\n print(v_, logp, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n self.assertArraySubdtypeFloat(logp)\n self.assertArrayShape(logp, ())\n\n # test_mode_* ##################################################################################\n\n def test_mode_discrete(self):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_ = v.mode(s)\n print(v_, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n\n def test_mode_boxspace(self):\n env = Env(boxspace, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_ = v.mode(s)\n print(v_, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n\n def test_function_state(self):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n print(v.function_state)\n batch_norm_avg = v.function_state['batch_norm/~/mean_ema']['average']\n self.assertArrayShape(batch_norm_avg, (1, 8))\n self.assertArrayNotEqual(batch_norm_avg, jnp.zeros_like(batch_norm_avg))\n\n # other tests ##################################################################################\n\n def test_bad_input_signature(self):\n def badfunc(S, is_training, x):\n pass\n msg = (\n r\"func has bad signature; \"\n r\"expected: func\\(S, is_training\\), \"\n r\"got: func\\(S, is_training, x\\)\"\n )\n with self.assertRaisesRegex(TypeError, msg):\n env = Env(boxspace, discrete)\n value_range = (-10, 10)\n StochasticV(badfunc, env, value_range, num_bins=num_bins, random_seed=13)\n\n def test_bad_output_structure(self):\n def badfunc(S, is_training):\n dist_params = func(S, is_training)\n dist_params['foo'] = jnp.zeros(1)\n return dist_params\n msg = (\n r\"func has bad return tree_structure, \"\n r\"expected: PyTreeDef\\({'logits': \\*}\\), \"\n r\"got: PyTreeDef\\({'foo': \\*, 'logits': \\*}\\)\"\n )\n with self.assertRaisesRegex(TypeError, msg):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n StochasticV(badfunc, env, value_range, num_bins=num_bins, random_seed=13)\n", "sub_path": "coax/_core/stochastic_v_test.py", "file_name": "stochastic_v_test.py", "file_ext": "py", "file_size_in_byte": 6723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gym.spaces.Discrete", "line_number": 34, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 35, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 38, "usage_type": "call"}, {"api_name": "haiku.BatchNorm", "line_number": 42, "usage_type": "call"}, {"api_name": "haiku.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "haiku.Flatten", "line_number": 44, "usage_type": "call"}, {"api_name": "haiku.Linear", "line_number": 45, "usage_type": "call"}, {"api_name": "jax.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 46, "usage_type": "call"}, {"api_name": "haiku.dropout", "line_number": 46, "usage_type": "attribute"}, {"api_name": "haiku.next_rng_key", "line_number": 46, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 47, "usage_type": "call"}, {"api_name": "haiku.Linear", "line_number": 48, "usage_type": "call"}, {"api_name": "jax.numpy.tanh", "line_number": 48, "usage_type": "attribute"}, {"api_name": "jax.numpy", "line_number": 48, "usage_type": "name"}, {"api_name": "haiku.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "_base.test_case.TestCase", "line_number": 54, "usage_type": "name"}, {"api_name": "stochastic_v.StochasticV", "line_number": 56, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 57, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 58, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 67, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 68, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 80, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 81, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 95, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 96, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 106, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 107, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 111, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 117, "usage_type": "call"}, {"api_name": "jax.numpy.zeros_like", "line_number": 122, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 122, "usage_type": "name"}, {"api_name": "stochastic_v.StochasticV", "line_number": 137, "usage_type": "call"}, {"api_name": "jax.numpy.zeros", "line_number": 142, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 142, "usage_type": "name"}, {"api_name": "stochastic_v.StochasticV", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "430165915", "text": "import SamTech\nimport cv2\nimport os\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam, SGD\n\nfrom numpy.random import seed\nseed(42)\nfrom tensorflow import set_random_seed\nset_random_seed(2)\nfrom sklearn.metrics import classification_report, confusion_matrix\n\norigin_dir = 'SamTech/Vision/dataset/lesion/test/'\n# origin_dir = 'SamTech/Vision/dataset/lesion/train_2c/'\n\nsammy = SamTech.Vision.Classifier(category = 'lesion')\n# sammy.weight_path = 'SamTech/Vision/ckpt/'\nsammy.classes = ['BENIGN', 'MALIGNANT']\nsammy.classes.sort()\n\n\nsammy.load_model('inceptionResnetV2_lesion-320-320-3-c2-30-0.73.hdf5')\n\n\ntest_list = ['ML16','NM_small','ML17']\n# test_list = ['MALIGNANT','BENIGN']\n# test_list.pop(0)\ny_true = []\ny_pred = []\n\n\nfor folder in test_list:\n print ('x')\n _dir = origin_dir+folder\n for i in os.scandir(_dir):\n if 'NM' in folder:\n y_true.append(0)\n act = \"BENIGN\"\n elif 'ML' in folder:\n y_true.append(1)\n act = \"MELANOMAS\"\n\n raw_prediction = sammy.predict(_dir+'/'+i.name)[0]\n\n if raw_prediction[0]>=0.5:\n _class = 'BENIGN'\n y_pred.append(0)\n else:\n _class = 'MALIGNANT'\n y_pred.append(1)\n print ('BENIGN {}% MALIGNANT {}% {} , actually {}'.format(int(raw_prediction[0]*100),int(raw_prediction[1]*100),_class,act))\nprint (confusion_matrix(y_true,y_pred))\nprint (classification_report(y_true, y_pred, target_names=['BENIGN', 'MALIGNANT']))\n", "sub_path": "predict_boost.py", "file_name": "predict_boost.py", "file_ext": "py", "file_size_in_byte": 1539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.set_random_seed", "line_number": 10, "usage_type": "call"}, {"api_name": "SamTech.Vision.Classifier", "line_number": 16, "usage_type": "call"}, {"api_name": "SamTech.Vision", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.scandir", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "38174237", "text": "import pygame\n\n\nclass Scene:\n\n def __init__(self, screen, backgroundColor):\n self.screen = screen\n self.r = backgroundColor[0]\n self.g = backgroundColor[1]\n self.b = backgroundColor[2]\n self.gameObjects = []\n self.state = 0\n self.crazy = True\n self.rDirection = 1\n self.gDirection = 1\n self.bDirection = 1\n\n def add(self, game_object):\n self.gameObjects.append(game_object)\n\n def update(self, velocity):\n if self.crazy:\n self.process_background()\n for obj in self.gameObjects:\n obj.update(velocity, self)\n\n def draw(self):\n self.screen.fill((self.r, self.g, self.b))\n for obj in self.gameObjects:\n obj.draw(self.screen)\n\n def flip(self):\n pygame.display.flip()\n\n def process_background(self):\n self.r += 2 * self.rDirection\n self.g += 1 * self.gDirection\n self.b += 3 * self.bDirection\n\n if self.r >= 255:\n self.r = 255\n self.rDirection = -1\n elif self.r <= 0:\n self.r = 0\n self.rDirection = 1\n if self.g >= 255:\n self.g = 255\n self.gDirection = -1\n elif self.g <= 0:\n self.g = 0\n self.gDirection = 1\n if self.b >= 255:\n self.b = 255\n self.bDirection = -1\n elif self.b <= 0:\n self.b = 0\n self.bDirection = 1\n", "sub_path": "src/scene.py", "file_name": "scene.py", "file_ext": "py", "file_size_in_byte": 1466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.display.flip", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "30365789", "text": "import pytest\nfrom flask import url_for\n\nfrom trends.utils.feed_request import FeedRequest\n\n\ndef test_feed_request():\n params = {\"offset\": 0, \"limit\": 20, \"tag\": \"blogger\"}\n fr = FeedRequest()\n resp = fr.get_response(**params)\n assert resp.status_code == 200\n return resp\n\n\ndef test_feed_proxy(client):\n params = {\"offset\": 0, \"limit\": 20, \"tag\": \"blogger\"}\n resp = client.get(url_for(\"trends.feed_proxy\", **params),)\n assert resp.status_code == 200\n\n results = resp.json[\"data\"]\n assert len(results) == len(test_feed_request().json())\n # assert results['items'] == test_feed_request().json()['items'] # не могу\n # проверить, т.к. поля постоянно меняю свою очередность\n\n\n@pytest.mark.parametrize(\n (\"params\", \"status\"),\n [\n ({\"offset\": \"0\", \"limit\": \"20\", \"tag\": \"bla-bla\",}, 200), # nonexistent tag\n ({\"limit\": \"20\", \"tag\": \"blogger\",}, 200), # not offset\n ({\"offset\": \"0\", \"tag\": \"blogger\",}, 200), # not limit\n ({\"offset\": \"0\", \"limit\": \"20\",}, 200), # not tag\n ({\"offset\": \"1256985555\", \"limit\": \"20\", \"tag\": \"blogger\",}, 200),\n ],\n)\ndef test_feed_proxy_bad_request(client, params, status):\n resp = client.get(url_for(\"trends.feed_proxy\", **params),)\n assert resp.status_code == status\n", "sub_path": "backend/web/tests/api/test_feed_proxy.py", "file_name": "test_feed_proxy.py", "file_ext": "py", "file_size_in_byte": 1330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "trends.utils.feed_request.FeedRequest", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "203664804", "text": "from itertools import permutations\n\nword = input(\"Enter String : \")\nperms = [''.join(p) for p in permutations(word)]\n#print (perms)\nlist2 = []\noptions = ([x for x in input(\"Enter Value to list :\").split()])\n#print (options)\n\nfor i in options:\n if i in perms:\n list2.append(i)\n\nprint (list2)\n", "sub_path": "Exercises/Deepak/Collections and Iterations/p8.py", "file_name": "p8.py", "file_ext": "py", "file_size_in_byte": 301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "itertools.permutations", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "307296796", "text": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2020, David Swarbrick.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Tests for image resizing based on filesize.\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\n\nimport unittest\nimport os\n\nfrom test import _common\nfrom test.helper import TestHelper\nfrom beets.util import syspath\nfrom beets.util.artresizer import (\n pil_resize,\n im_resize,\n get_im_version,\n get_pil_version,\n)\n\n\nclass ArtResizerFileSizeTest(_common.TestCase, TestHelper):\n \"\"\"Unittest test case for Art Resizer to a specific filesize.\"\"\"\n\n IMG_225x225 = os.path.join(_common.RSRC, b\"abbey.jpg\")\n IMG_225x225_SIZE = os.stat(syspath(IMG_225x225)).st_size\n\n def setUp(self):\n \"\"\"Called before each test, setting up beets.\"\"\"\n self.setup_beets()\n\n def tearDown(self):\n \"\"\"Called after each test, unloading all plugins.\"\"\"\n self.teardown_beets()\n\n def _test_img_resize(self, resize_func):\n \"\"\"Test resizing based on file size, given a resize_func.\"\"\"\n # Check quality setting unaffected by new parameter\n im_95_qual = resize_func(\n 225,\n self.IMG_225x225,\n quality=95,\n max_filesize=0,\n )\n # check valid path returned - max_filesize hasn't broken resize command\n self.assertExists(im_95_qual)\n\n # Attempt a lower filesize with same quality\n im_a = resize_func(\n 225,\n self.IMG_225x225,\n quality=95,\n max_filesize=0.9 * os.stat(syspath(im_95_qual)).st_size,\n )\n self.assertExists(im_a)\n # target size was achieved\n self.assertLess(os.stat(syspath(im_a)).st_size,\n os.stat(syspath(im_95_qual)).st_size)\n\n # Attempt with lower initial quality\n im_75_qual = resize_func(\n 225,\n self.IMG_225x225,\n quality=75,\n max_filesize=0,\n )\n self.assertExists(im_75_qual)\n\n im_b = resize_func(\n 225,\n self.IMG_225x225,\n quality=95,\n max_filesize=0.9 * os.stat(syspath(im_75_qual)).st_size,\n )\n self.assertExists(im_b)\n # Check high (initial) quality still gives a smaller filesize\n self.assertLess(os.stat(syspath(im_b)).st_size,\n os.stat(syspath(im_75_qual)).st_size)\n\n @unittest.skipUnless(get_pil_version(), \"PIL not available\")\n def test_pil_file_resize(self):\n \"\"\"Test PIL resize function is lowering file size.\"\"\"\n self._test_img_resize(pil_resize)\n\n @unittest.skipUnless(get_im_version(), \"ImageMagick not available\")\n def test_im_file_resize(self):\n \"\"\"Test IM resize function is lowering file size.\"\"\"\n self._test_img_resize(im_resize)\n\n\ndef suite():\n \"\"\"Run this suite of tests.\"\"\"\n return unittest.TestLoader().loadTestsFromName(__name__)\n\n\nif __name__ == \"__main__\":\n unittest.main(defaultTest=\"suite\")\n", "sub_path": "test/test_art_resize.py", "file_name": "test_art_resize.py", "file_ext": "py", "file_size_in_byte": 3566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "test._common.TestCase", "line_number": 35, "usage_type": "attribute"}, {"api_name": "test._common", "line_number": 35, "usage_type": "name"}, {"api_name": "test.helper.TestHelper", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "test._common.RSRC", "line_number": 38, "usage_type": "attribute"}, {"api_name": "test._common", "line_number": 38, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 39, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 39, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 66, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 66, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 70, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 70, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 71, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 71, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 86, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 86, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 90, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 90, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 91, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 91, "usage_type": "call"}, {"api_name": "beets.util.artresizer.pil_resize", "line_number": 96, "usage_type": "argument"}, {"api_name": "unittest.skipUnless", "line_number": 93, "usage_type": "call"}, {"api_name": "beets.util.artresizer.get_pil_version", "line_number": 93, "usage_type": "call"}, {"api_name": "beets.util.artresizer.im_resize", "line_number": 101, "usage_type": "argument"}, {"api_name": "unittest.skipUnless", "line_number": 98, "usage_type": "call"}, {"api_name": "beets.util.artresizer.get_im_version", "line_number": 98, "usage_type": "call"}, {"api_name": "unittest.TestLoader", "line_number": 106, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "615800104", "text": "from flask import Flask, jsonify, request, render_template\nfrom flask_cors import CORS, cross_origin\nimport requests, json\n\napp = Flask(__name__)\ncors = CORS(app)\n\n# SORRY UMBRA PLEASE DONT HURT ME\n\n@app.route('/allsongs')\ndef all_songs():\n\tall_songs_url = \"https://www.scoresaber.com/api.php?function=get-leaderboards&cat=3&page=1&limit=1000\"\n\tr = requests.get(all_songs_url)\n\treturn r.text\n\t\n@app.route('/history')\ndef user():\n\tuser_id = request.args.get('user_id', '')\n\t\n\tuser_history_url = \"https://new.scoresaber.com/api/player/\"+user_id+\"/scores/top/\"\t\n\tuser_profile_url = \"https://new.scoresaber.com/api/player/\"+user_id+\"/full\"\n\tcurrent_page = 1\n\tdata = []\n\t\n\tlast_is_ranked = True\n\twhile last_is_ranked:\n\t\t# print(\"Fetching page\", current_page)\n\t\tr = requests.get(user_history_url+str(current_page))\n\t\tdata.extend(json.loads(r.text)[\"scores\"])\n\n\t\tif data[-1][\"pp\"] == 0:\n\t\t\tlast_is_ranked = False\n\n\t\tcurrent_page += 1\n\n\t# remove non-ranked elements, veeeery lazyly\n\tdata = list(filter(lambda x: x[\"pp\"] > 0, data))\n\taggregate_data = {}\n\taggregate_data[\"scores\"] = data\n\tr = requests.get(user_profile_url)\n\taggregate_data[\"profile\"] = json.loads(r.text)\n\n\treturn jsonify(aggregate_data)\n\n@app.route('/')\ndef home():\n\treturn render_template('index.html')\n\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=80)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "366396052", "text": "import json\n\nd = dict()\ncount=1\nwith open(\"infytq.txt\") as f:\n while(True):\n query = f.readline()\n print(query)\n if query != '':\n reply = f.readline()\n f.readline()\n d[count] = {\"query\":query,\"reply\":reply}\n count+=1\n else:\n break\nf = open(\"data.json\",\"w+\")\nf.write(json.dumps(d))\nf.close()\n", "sub_path": "anotherTry/query_reply.py", "file_name": "query_reply.py", "file_ext": "py", "file_size_in_byte": 377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.dumps", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "25620921", "text": "from Acquisition import aq_inner\nfrom five import grok\nfrom plone import api\nfrom zope.component.hooks import getSite\n\nfrom pressapp.presscontent.pressinvitation import IPressInvitation\nfrom pressapp.presscontent.interfaces import IPressContent\n\n\nclass PrepareRelease(grok.View):\n grok.context(IPressContent)\n grok.require('cmf.ModifyPortalContent')\n grok.name('prepare-release')\n\n def update(self):\n self.recipient_count = len(self.recipient_list())\n self.has_recipients = self.recipient_count > 0\n self.subscriber_count = len(self.subscriber_list())\n self.has_subscribers = self.subscriber_count > 0\n\n def is_administrator(self):\n context = aq_inner(self.context)\n is_admin = False\n admin_roles = ('Site Administrator', 'Manager')\n user = api.user.get_current()\n roles = api.user.get_roles(username=user.getId(), obj=context)\n for role in roles:\n if role in admin_roles:\n is_admin = True\n return is_admin\n\n def is_pressinvitation(self):\n context = aq_inner(self.context)\n return IPressInvitation.providedBy(context)\n\n def recipient_list(self):\n context = aq_inner(self.context)\n recipients = getattr(context, 'recipients', '')\n return recipients\n\n def subscriber_list(self):\n portal = getSite()\n presscenter = portal['presscenter']\n subscribers = getattr(presscenter, 'subscribers', '')\n return subscribers\n\n def reformat_recipients(self, item):\n item = item.split(',', 1)\n return item\n\n def has_channel_info(self):\n context = aq_inner(self.context)\n channel = getattr(context, 'channel', None)\n if channel:\n return True\n\n def has_recipients_info(self):\n context = aq_inner(self.context)\n recipients = getattr(context, 'recipients', None)\n if recipients:\n return True\n", "sub_path": "src/pressapp.presscontent/pressapp/presscontent/preparerelease.py", "file_name": "preparerelease.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "five.grok.View", "line_number": 10, "usage_type": "attribute"}, {"api_name": "five.grok", "line_number": 10, "usage_type": "name"}, {"api_name": "five.grok.context", "line_number": 11, "usage_type": "call"}, {"api_name": "pressapp.presscontent.interfaces.IPressContent", "line_number": 11, "usage_type": "argument"}, {"api_name": "five.grok", "line_number": 11, "usage_type": "name"}, {"api_name": "five.grok.require", "line_number": 12, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 12, "usage_type": "name"}, {"api_name": "five.grok.name", "line_number": 13, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 13, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 22, "usage_type": "call"}, {"api_name": "plone.api.user.get_current", "line_number": 25, "usage_type": "call"}, {"api_name": "plone.api.user", "line_number": 25, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 25, "usage_type": "name"}, {"api_name": "plone.api.user.get_roles", "line_number": 26, "usage_type": "call"}, {"api_name": "plone.api.user", "line_number": 26, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 26, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 33, "usage_type": "call"}, {"api_name": "pressapp.presscontent.pressinvitation.IPressInvitation.providedBy", "line_number": 34, "usage_type": "call"}, {"api_name": "pressapp.presscontent.pressinvitation.IPressInvitation", "line_number": 34, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 37, "usage_type": "call"}, {"api_name": "zope.component.hooks.getSite", "line_number": 42, "usage_type": "call"}, {"api_name": "Acquisition.aq_inner", "line_number": 52, "usage_type": "call"}, {"api_name": "Acquisition.aq_inner", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "486789065", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib/python2.7/site-packages/weborg/lib/client.py\n# Compiled at: 2011-08-01 04:14:42\nimport subprocess, os, logging, re, memcache, settings\nlog = logging.getLogger('client')\nlog.setLevel(level=logging.DEBUG)\nhandler = logging.FileHandler(settings.LOG_FILE)\nhandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\nlog.addHandler(handler)\nmc = memcache.Client(['localhost:11211'])\n\ndef execute(cmd):\n env = os.environ\n env['LANG'] = 'en_US.utf8'\n full_cmd = settings.EMACS + ' -q -batch -l ~/.emacs.d/70-org-mode.el -l ' + settings.ORG_EL + \" -eval '%s'\" % cmd.encode('utf-8')\n p = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)\n stdout, stderr = p.communicate()\n log.debug('RPC: %s' % full_cmd)\n log.debug('Result: %s' % stdout)\n return stdout\n\n\ndef entry_index():\n idx = mc.get('idx')\n if idx:\n return idx\n else:\n cmd = '(entry-index)'\n idx = execute(cmd)\n mc.set('idx', idx)\n return idx\n\n\ndef entry_create(eid, jsonstr):\n cmd = '(entry-create \"%s\" \"%s\")' % (eid, re.escape(jsonstr))\n return execute(cmd)\n\n\ndef entry_new(eid):\n cmd = '(entry-new \"%s\")' % eid\n return execute(cmd)\n\n\ndef entry_update(eid, jsonstr):\n cmd = '(entry-update \"%s\" \"%s\")' % (eid, re.escape(jsonstr))\n return execute(cmd)\n\n\ndef entry_delete(eid):\n cmd = '(entry-delete \"%s\")' % eid\n return execute(cmd)\n\n\ndef entry_show(eid):\n cache = mc.get(str(eid))\n if cache:\n return cache\n else:\n cmd = '(entry-show \"%s\")' % eid\n result = execute(cmd)\n mc.set(str(eid), result)\n return result\n\n\ndef entry_edit(eid):\n cmd = '(entry-edit \"%s\")' % eid\n return execute(cmd)", "sub_path": "pycfiles/WebOrg-0.2.3dev.linux-i686.tar/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 10, "usage_type": "call"}, {"api_name": "settings.LOG_FILE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 11, "usage_type": "call"}, {"api_name": "memcache.Client", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "settings.EMACS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "settings.ORG_EL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 19, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "re.escape", "line_number": 38, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "612456646", "text": "import boto3\nfrom botocore.exceptions import ClientError\n\nclass DatabaseClient(object):\n def __init__(self, config):\n self._config = config\n self.connect(self._config)\n \n def connect(self, config):\n self.dynamodb = boto3.resource('dynamodb')\n \n def insert(self, table_name, data):\n table = self.dynamodb.Table(table_name)\n response = table.put_item(Item=data)\n return response\n\n def get(self, table_name, id):\n table = self.dynamodb.Table(table_name)\n scan_kwarg = {\n 'FilterExpression': 'user_id = :val',\n 'ExpressionAttributeValues': {':val': id}\n }\n done = False\n start_key = None\n rows = []\n i = 0\n while not done:\n i+=1\n if start_key:\n scan_kwarg['ExclusiveStartKey'] = start_key\n response = table.scan(**scan_kwarg)\n rows = rows + response.get('Items', [])\n start_key = response.get('LastEvaulatedKey', None)\n done = start_key is None\n return rows\n \n def delete_items(self, ids):\n response = [delete_item(id) for id in ids]\n return response\n\n def delete_item(table_name, id):\n table = self.dynamodb.Table(table_name)\n \n try:\n table.delete_item(\n Key = {\n 'id': id\n }\n )\n except ClientError as e:\n print(\"Error: \")\n print(e)\n return False\n else:\n return response", "sub_path": "backend/dynamodb/build/lib/dynamodb/database_client.py", "file_name": "database_client.py", "file_ext": "py", "file_size_in_byte": 1558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "boto3.resource", "line_number": 10, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "168393493", "text": "import pygame.ftfont\nimport pygame\npygame.init()\npygame.mixer.init()\n\nclass Music_button:\n def __init__(self, screen):\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.rect = pygame.Rect(0, 600, 67, 71)\n self.image=pygame.image.load('images/Sound.png')\n self.BGM_music=pygame.mixer.music.load(\"music/BGM.mp3\")\n self.Music_play=True\n self.music_continuous=True\n\n def music_play(self):\n if self.Music_play==True and self.music_continuous==True:\n pygame.mixer.music.play()\n self.music_continuous=False\n\n def Music_button_play(self):\n if self.Music_play==False:\n self.Music_play=True\n self.music_continuous=True\n else:\n self.Music_play=False\n pygame.mixer_music.pause()\n\n def blit(self):\n self.screen.blit(self.image,self.rect)\n\n", "sub_path": "GAME/music_button.py", "file_name": "music_button.py", "file_ext": "py", "file_size_in_byte": 893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.mixer_music.pause", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.mixer_music", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "381817922", "text": "''' create_unique_drug_order_txt.py '''\n\nimport sqlalchemy\nfrom db import Db\nfrom dotenv import load_dotenv\nfrom os.path import join, dirname, os\nfrom sqlalchemy.sql import select\n\n\n## load environment values from .env\ndotenv_path = join(dirname(__file__), '../.env')\nload_dotenv(dotenv_path)\n\n## connect to postgres\ndb = Db()\nconnection = db.connect_postgres()\nconn = connection[0]\nmeta = connection[1]\n\nmediSpanTable = meta.tables['medispan_uniquegpi']\ns = select([\n mediSpanTable.c.drug_name,\n]) \\\n.order_by(mediSpanTable.c.drug_name) \\\n.distinct()\n\nDRUG_FILE = os.getenv(\"DRUG_LIST\")\nf = open(DRUG_FILE, \"w\")\n\nresult = conn.execute(s)\nfor drugName in result:\n drug = drugName[0].replace('\"', '')\n f.write('\"' + drug + '\",')\n\nf.close()\n", "sub_path": "pharma/deprecated/create_unique_drug_order_txt.py", "file_name": "create_unique_drug_order_txt.py", "file_ext": "py", "file_size_in_byte": 749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 12, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 15, "usage_type": "call"}, {"api_name": "db.connect_postgres", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.select", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.os", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "55878360", "text": "import json\nimport os\nimport sys\nimport pytest\nimport pep8\n\nsys.path.insert(0, os.path.abspath(__file__ + \"/../..\"))\n\nfrom jsane import loads, dumps, JSaneException, from_dict\nfrom jsane.traversable import Traversable\n\n\nclass TestClass:\n @pytest.fixture(autouse=True)\n def create_data(self):\n self.json1 = \"\"\"\n {\n \"r\": \"yo\",\n \"key_1\": \"value_1\",\n \"key_2\": {\n \"key_21\": [\n [2100, 2101],\n [2110, 2111]\n ],\n \"key_22\": [\"l1\", \"l2\"],\n \"key_23\": {\"key_231\":\"v\"},\n \"key_24\": {\n \"key_241\": 502,\n \"key_242\": [\n [5, 0],\n [7, 0]\n ],\n \"key_243\": {\n \"key_2431\": [0, 0],\n \"key_2432\": 504,\n \"key_2433\": [\n [11451, 0],\n [11452, 0]\n ]\n },\n \"key_244\": {\n \"key_2441\": {\n \"key_24411\": {\n \"key_244111\": \"v_24411\",\n \"key_244112\": [[5549, 0]]\n },\n \"key_24412\": \"v_24412\"\n },\n \"key_2442\": [\"ll1\", \"ll2\"]\n }\n }\n }\n }\n \"\"\"\n self.dict1 = {\"foo\": \"bar\"}\n\n def test_wrapper(self):\n assert loads(dumps(self.dict1)).r() == self.dict1\n assert json.dumps(self.dict1) == dumps(self.dict1)\n assert self.dict1[\"foo\"] == from_dict(self.dict1).foo.r()\n assert loads(dumps(self.dict1)), Traversable(self.dict1)\n\n def test_access(self):\n j = loads(self.json1)\n assert j.key_1.r() == \"value_1\"\n assert j[\"r\"].r() == \"yo\"\n assert j.key_2.key_21[1][1].r() == 2111\n\n def test_exception(self):\n j = loads(self.json1)\n with pytest.raises(JSaneException):\n j.key_2.nonexistent[0].r()\n with pytest.raises(JSaneException):\n j.key_2.key_21[7].r()\n with pytest.raises(JSaneException):\n j.key_1.key_2.r()\n with pytest.raises(IndexError):\n j.key_2.key_24.key_244.key_2442[0].r()[7]\n with pytest.raises(JSaneException):\n j.key_2.key_24.key_244.key_2442[0][7].r()\n\n def test_default(self):\n j = loads(self.json1)\n assert j.key_1.key_2.r(None) is None\n assert j.key_2.nonexistent[0].r(\"default\") == \"default\"\n assert j.key_2.key_21[7].r(\"default\") == \"default\"\n with pytest.raises(IndexError):\n j.key_2.key_24.key_244.key_2442[0].r(\"default\")[7]\n\n def test_resolution(self):\n j = loads(self.json1)\n assert j.key_2.key_21[0].r() == [2100, 2101]\n assert j.key_2.key_21[0].r() == [2100, 2101]\n assert j.key_2.key_24.key_244.key_2442[0].r()[0] == \"l\"\n\n def test_pep8(self):\n pep8style = pep8.StyleGuide([['statistics', True],\n ['show-sources', True],\n ['repeat', True],\n ['ignore', \"E501\"],\n ['paths', [os.path.dirname(\n os.path.abspath(__file__))]]],\n parse_argv=False)\n report = pep8style.check_files()\n assert report.total_errors == 0\n", "sub_path": "tests/test_jsane.py", "file_name": "test_jsane.py", "file_ext": "py", "file_size_in_byte": 3381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 14, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "jsane.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "jsane.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "jsane.from_dict", "line_number": 60, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "jsane.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "jsane.traversable.Traversable", "line_number": 61, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 64, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 70, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 71, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 71, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 73, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 73, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 75, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 75, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 79, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 79, "usage_type": "argument"}, {"api_name": "jsane.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 87, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "pep8.StyleGuide", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "516059555", "text": "from django.core.urlresolvers import reverse_lazy\nfrom django.db import transaction\nfrom django.views.generic import CreateView, UpdateView, DeleteView, ListView\nfrom django.shortcuts import render,get_object_or_404,redirect\nfrom .models import Pazar,Bolum,Reyon,UrunAdi,UrunTipi,UrunOzellikleri\nfrom .forms import PazarForm,BolumForm,ReyonForm,UrunTipiForm,UrunAdiForm,UrunOzellikleriForm\n\n\ndef PazarList(request): \n pazarlar=Pazar.objects.all()\n context={\n 'pazarlar':pazarlar,\n }\n return render(request,'main/pazar_list.html',context)\n\ndef PazarDetail(request,id):\n pazar=get_object_or_404(Pazar,id=id)\n bolum=Bolum.objects.all().filter(pazar_id=id)\n reyon=Reyon.objects.all().filter(pazar_id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'pazar':pazar,\n 'bolum':bolum, \n 'reyon':reyon,\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail.html',context)\n\ndef BolumDetail(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n reyon=Reyon.objects.all().filter(pazar_id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'bolum':bolum, \n 'reyon':reyon,\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-bolum.html',context)\n\ndef ReyonDetail(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'reyon':reyon,\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-reyon.html',context)\ndef UrunTipiDetail(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-uruntipi.html',context)\ndef UrunAdiDetail(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'urunadi':urunadi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-urunadi.html',context)\ndef UrunOzellikDetail(request,id):\n urunozellikleri=get_object_or_404(UrunOzellikleri,id=id)\n context = {\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-urunozellikleri.html',context)\n\ndef PazarCreate(request):\n if request.method == \"POST\":\n form = PazarForm(request.POST)\n if form.is_valid():\n pazar = form.save(commit=False)\n pazar.save()\n return redirect('pazar-list')\n else:\n form = PazarForm()\n return render(request, 'main/create.html', {'form': form})\ndef BolumCreate(request,id):\n if request.method == \"POST\":\n form = BolumForm(request.POST)\n if form.is_valid():\n bolum = form.save(commit=False)\n bolum.pazar_id=id\n bolum.save()\n return redirect('pazar-list')\n else:\n form = BolumForm()\n return render(request, 'main/create.html', {'form': form})\ndef ReyonCreate(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n if request.method == \"POST\":\n form = ReyonForm(request.POST)\n if form.is_valid():\n reyon = form.save(commit=False)\n reyon.pazar_id=bolum.pazar_id\n reyon.bolum_id=id\n reyon.save()\n return redirect('pazar-list')\n else:\n form = ReyonForm()\n return render(request, 'main/create.html', {'form': form})\ndef UrunTipiCreate(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n if request.method == \"POST\":\n form = UrunTipiForm(request.POST)\n if form.is_valid():\n uruntipi = form.save(commit=False)\n uruntipi.pazar_id=reyon.pazar_id\n uruntipi.reyon_id=id\n uruntipi.save()\n return redirect('pazar-list')\n else:\n form = UrunTipiForm()\n return render(request, 'main/create.html', {'form': form})\ndef UrunAdiCreate(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id) \n if request.method == \"POST\":\n form = UrunAdiForm(request.POST)\n if form.is_valid():\n urunadi = form.save(commit=False)\n urunadi.pazar_id=uruntipi.pazar_id\n urunadi.urunTipi_id=id\n urunadi.save()\n return redirect('pazar-list')\n else:\n form = UrunAdiForm()\n return render(request, 'main/create.html', {'form': form})\ndef UrunOzellikCreate(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n \n if request.method == \"POST\":\n form = UrunOzellikleriForm(request.POST)\n if form.is_valid():\n urunozellik = form.save(commit=False)\n urunozellik.pazar_id=urunadi.pazar_id\n urunozellik.urunadi_id=id\n urunozellik.stok=5\n urunozellik.save()\n return redirect('pazar-list')\n else:\n form = UrunOzellikleriForm()\n return render(request, 'main/create.html', {'form': form})\n\ndef PazarUpdate(request,id):\n pazar=get_object_or_404(Pazar,id=id)\n bolum=Bolum.objects.all().filter(pazar_id=id)\n reyon=Reyon.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n urunoz=UrunOzellikleri.objects.all().filter(pazar_id=id)\n root_id=id\n form=PazarForm(request.POST or None,instance=pazar)\n if form.is_valid():\n pazar=form.save(commit=False)\n pazar.save()\n \n context = {\n 'root_id':root_id,\n 'form':form,\n 'bolum':bolum,\n 'reyon':reyon,\n 'uruntipi':uruntipi,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n\n return render(request,'main/update.html',context)\ndef BolumUpdate(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n reyon=Reyon.objects.all().filter(bolum_id=bolum.id)\n uruntipi=UrunTipi.objects.all().filter(reyon_id=reyon.first().id)\n urunadi=UrunAdi.objects.all().filter(urunTipi_id=uruntipi.first().id)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.first().id)\n\n form=BolumForm(request.POST or None,instance=bolum)\n\n if form.is_valid():\n bolum=form.save(commit=False)\n bolum.save()\n context = {\n 'form':form,\n 'reyon':reyon,\n 'uruntipi':uruntipi,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-bolum.html',context)\ndef ReyonUpdate(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n form=ReyonForm(request.POST or None,instance=reyon)\n uruntipi=UrunTipi.objects.all().filter(reyon_id=reyon.id)\n urunadi=UrunAdi.objects.all().filter(urunTipi_id=uruntipi.first().id)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.first().id)\n\n if form.is_valid():\n reyon=form.save(commit=False)\n reyon.save()\n context = {\n 'form':form,\n 'uruntipi':uruntipi,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-reyon.html',context)\ndef UrunTipiUpdate(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id)\n form=UrunTipiForm(request.POST or None,instance=uruntipi)\n urunadi=UrunAdi.objects.all().filter(urunTipi_id=uruntipi.id)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.first().id)\n\n if form.is_valid():\n uruntipi=form.save(commit=False)\n uruntipi.save()\n context = {\n 'form':form,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-uruntipi.html',context)\ndef UrunAdiUpdate(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n form=UrunAdiForm(request.POST or None,instance=urunadi)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.id)\n\n if form.is_valid():\n urunadi=form.save(commit=False)\n urunadi.save()\n context = {\n 'form':form,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-urunadi.html',context)\ndef UrunOzellikUpdate(request,id):\n urunoz=get_object_or_404(UrunOzellikleri,id=id)\n form=UrunOzellikleriForm(request.POST or None,instance=urunoz)\n if form.is_valid():\n urunoz=form.save(commit=False)\n urunoz.save()\n context = {\n 'form':form,\n }\n return render(request,'main/update-urunozellik.html',context)\n\ndef PazarDelete(request,id):\n pazar=get_object_or_404(Pazar,id=id)\n pazar.delete()\n return redirect(\"pazar-list\")\ndef BolumDelete(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n bolum.delete()\n return redirect(\"pazar-list\")\ndef ReyonDelete(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n reyon.delete()\n return redirect(\"pazar-list\")\ndef UrunTipiDelete(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id)\n uruntipi.delete()\n return redirect(\"pazar-list\")\ndef UrunAdiDelete(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n urunadi.delete()\n return redirect(\"pazar-list\")\ndef UrunOzellikleriDelete(request,id):\n urunoz=get_object_or_404(UrunOzellikleri,id=id)\n urunoz.delete()\n return redirect(\"pazar-list\")\n\n\n", "sub_path": "main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "models.Pazar.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Pazar.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Pazar", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Pazar", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.Bolum.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Bolum.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Bolum", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Reyon.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 19, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 20, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 21, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 34, "usage_type": "argument"}, {"api_name": "models.Reyon.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 35, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 36, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 36, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 37, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 49, "usage_type": "argument"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 50, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 50, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 51, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 51, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 52, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 61, "usage_type": "argument"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 62, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 62, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 63, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 63, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 71, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 71, "usage_type": "argument"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 72, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 72, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 79, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri", "line_number": 79, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "forms.PazarForm", "line_number": 87, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "forms.PazarForm", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 94, "usage_type": "call"}, {"api_name": "forms.BolumForm", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 102, "usage_type": "call"}, {"api_name": "forms.BolumForm", "line_number": 104, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 107, "usage_type": "argument"}, {"api_name": "forms.ReyonForm", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 115, "usage_type": "call"}, {"api_name": "forms.ReyonForm", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 120, "usage_type": "argument"}, {"api_name": "forms.UrunTipiForm", "line_number": 122, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 128, "usage_type": "call"}, {"api_name": "forms.UrunTipiForm", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 133, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 133, "usage_type": "argument"}, {"api_name": "forms.UrunAdiForm", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 141, "usage_type": "call"}, {"api_name": "forms.UrunAdiForm", "line_number": 143, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 144, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 146, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 146, "usage_type": "argument"}, {"api_name": "forms.UrunOzellikleriForm", "line_number": 149, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 156, "usage_type": "call"}, {"api_name": "forms.UrunOzellikleriForm", "line_number": 158, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 159, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Pazar", "line_number": 162, "usage_type": "argument"}, {"api_name": "models.Bolum.objects.all", "line_number": 163, "usage_type": "call"}, {"api_name": "models.Bolum.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "models.Bolum", "line_number": 163, "usage_type": "name"}, {"api_name": "models.Reyon.objects.all", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 164, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 165, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 165, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 166, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 166, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 167, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 167, "usage_type": "name"}, {"api_name": "forms.PazarForm", "line_number": 169, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 184, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 186, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 186, "usage_type": "argument"}, {"api_name": "models.Reyon.objects.all", "line_number": 187, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 187, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 188, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 188, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 188, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 189, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 189, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 189, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 190, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 190, "usage_type": "name"}, {"api_name": "forms.BolumForm", "line_number": 192, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 204, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 206, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 206, "usage_type": "argument"}, {"api_name": "forms.ReyonForm", "line_number": 207, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 208, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 208, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 208, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 209, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 209, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 210, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 210, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 210, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 221, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 223, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 223, "usage_type": "argument"}, {"api_name": "forms.UrunTipiForm", "line_number": 224, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 225, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 225, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 225, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 226, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 226, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 226, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 236, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 238, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 238, "usage_type": "argument"}, {"api_name": "forms.UrunAdiForm", "line_number": 239, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 240, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 240, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 240, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 249, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 251, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri", "line_number": 251, "usage_type": "argument"}, {"api_name": "forms.UrunOzellikleriForm", "line_number": 252, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 259, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 262, "usage_type": "call"}, {"api_name": "models.Pazar", "line_number": 262, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 264, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 266, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 266, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 268, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 270, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 270, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 272, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 274, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 274, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 276, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 278, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 278, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 280, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 282, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri", "line_number": 282, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "513457165", "text": "from aiohttp import web\nfrom functools import partial\nimport asyncio\nimport aiofiles\nimport datetime\nimport os\nimport logging\nimport argparse\n\n\nasync def archivate(delay, path_to_dir, request):\n response = web.StreamResponse()\n\n archive_hash = request.match_info.get('archive_hash')\n\n archive_path = os.path.abspath(f'{path_to_dir}/{archive_hash}')\n\n # Если каталога не сужествует возвращать 404 Not Found \n if not os.path.exists(archive_path):\n raise web.HTTPNotFound()\n\n response.headers['Content-Type'] = 'application/zip'\n response.headers['Content-Disposition'] = f'attachment; filename=\"{archive_hash}.zip\"'\n await response.prepare(request)\n\n cmd = ['zip', '-r', '-', archive_path]\n process = await asyncio.create_subprocess_exec(\n *cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n try:\n while True:\n data = await process.stdout.read(100000)\n if not data:\n break\n\n logger.debug( u'Sending archive chunk ...' )\n await response.write(data)\n await asyncio.sleep(delay)\n\n except asyncio.CancelledError:\n logger.debug( u'Download was interrupted' )\n process.kill()\n await process.communicate()\n raise\n finally:\n response.force_close()\n\n return response\n\n\nasync def handle_index_page(request):\n async with aiofiles.open('index.html', mode='r') as index_file:\n index_contents = await index_file.read()\n return web.Response(text=index_contents, content_type='text/html')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Microservice for downloading archives.'\n )\n parser.add_argument('-l', '--logger', help='Enable logger',\n action='store_true')\n parser.add_argument('-d', '--delay', default=1, type=int, help='Set delay')\n parser.add_argument('-p', '--path', default='./test_photos', type=str,\n help='Specify the path to directory')\n args = parser.parse_args()\n\n logging.basicConfig(level = logging.DEBUG)\n logger = logging.getLogger('Logger')\n logger.disabled = not args.logger\n\n app = web.Application()\n app.add_routes([\n web.get('/', handle_index_page),\n web.get('/archive/{archive_hash}/', partial(archivate, args.delay, args.path)),\n ])\n web.run_app(app)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "aiohttp.web.StreamResponse", "line_number": 12, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "aiohttp.web.HTTPNotFound", "line_number": 20, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 20, "usage_type": "name"}, {"api_name": "asyncio.create_subprocess_exec", "line_number": 27, "usage_type": "call"}, {"api_name": "asyncio.subprocess", "line_number": 29, "usage_type": "attribute"}, {"api_name": "asyncio.subprocess", "line_number": 30, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "asyncio.CancelledError", "line_number": 42, "usage_type": "attribute"}, {"api_name": "aiofiles.open", "line_number": 54, "usage_type": "call"}, {"api_name": "aiohttp.web.Response", "line_number": 56, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 56, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 70, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 71, "usage_type": "call"}, {"api_name": "aiohttp.web.Application", "line_number": 74, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 74, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 76, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 76, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 77, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 77, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 77, "usage_type": "call"}, {"api_name": "aiohttp.web.run_app", "line_number": 79, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "162086703", "text": "import cv2\n\ncameraCapture = cv2.VideoCapture(0)\nfps = 30 # assumption\nsize = (int(cameraCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cameraCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\nvideoWriter = cv2.VideoWriter(\n 'img/output2.avi', cv2.VideoWriter_fourcc('I', '4', '2', '0'),\n fps, size\n)\n\nsuccess, frame = cameraCapture.read()\nnumFrameRemaining = 10 * fps - 1\n\nwhile success and numFrameRemaining > 0 :\n videoWriter.write(frame)\n success, frame = cameraCapture.read()\n numFrameRemaining -= 1\n\ncameraCapture.release()", "sub_path": "learning-opencv-computer-vision-book/handling-files-camera-and-gui/basic-io/capturing-camer-frames.py", "file_name": "capturing-camer-frames.py", "file_ext": "py", "file_size_in_byte": 539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.VideoCapture", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "545286609", "text": "# TENSORFLOW 2.0\nimport glob\nimport pickle\nimport numpy\nfrom music21 import converter, instrument, note, chord\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Bidirectional, LSTM, concatenate, Input\nfrom tensorflow.keras.layers import BatchNormalization as BatchNorm\nimport tensorflow.keras.utils as np_utils\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import Model\n\ndef train_network():\n\t\"\"\" Train a Neural Network to generate music \"\"\"\n\tnotes, offsets, durations = get_notes()\n\n\tn_vocab_notes = len(set(notes))\n\tnetwork_input_notes, network_output_notes = prepare_sequences(notes, n_vocab_notes)\n\t\n\tn_vocab_offsets = len(set(offsets))\n\tnetwork_input_offsets, network_output_offsets = prepare_sequences(offsets, n_vocab_offsets)\n\t\n\tn_vocab_durations = len(set(durations))\n\tnetwork_input_durations, network_output_durations = prepare_sequences(durations, n_vocab_durations)\n\tmodel = create_network(network_input_notes, n_vocab_notes, network_input_offsets, n_vocab_offsets, network_input_durations, n_vocab_durations)\n\ttrain(model, network_input_notes, network_input_offsets, network_input_durations, network_output_notes, network_output_offsets, network_output_durations)\n\ndef get_notes():\n\t\"\"\" Get all the notes and chords from the midi files in the ./midi_songs directory \"\"\"\n\tnotes = []\n\toffsets = []\n\tdurations = []\n\n\tfor file in glob.glob(\"classical-piano-type0/*.mid\"):\n\t\tmidi = converter.parse(file)\n\n\t\tprint(\"Parsing %s\" % file)\n\n\t\tnotes_to_parse = None\n\n\t\ttry: # file has instrument parts\n\t\t\ts2 = instrument.partitionByInstrument(midi)\n\t\t\tnotes_to_parse = s2.parts[0].recurse() \n\t\texcept: # file has notes in a flat structure\n\t\t\tnotes_to_parse = midi.flat.notes\n\t\t\n\t\t\n\t\toffsetBase = 0\n\t\tfor element in notes_to_parse:\n\t\t\tisNoteOrChord = False\n\t\t\t\n\t\t\tif isinstance(element, note.Note):\n\t\t\t\tnotes.append(str(element.pitch))\n\t\t\t\tisNoteOrChord = True\n\t\t\telif isinstance(element, chord.Chord):\n\t\t\t\tnotes.append('.'.join(str(n) for n in element.normalOrder))\n\t\t\t\tisNoteOrChord = True\n\t\t\t\n\t\t\tif isNoteOrChord:\n\t\t\t\toffsets.append(str(element.offset - offsetBase))\n\t\t\t\tdurations.append(str(element.duration.quarterLength))\n\t\t\t\tisNoteOrChord = False\n\t\t\t\toffsetBase = element.offset\n\t\t\t\t\n\n\twith open('data/notes', 'wb') as filepath:\n\t\tpickle.dump(notes, filepath)\n\t\n\twith open('data/durations', 'wb') as filepath:\n\t\tpickle.dump(durations, filepath)\n\t\t\n\twith open('data/offsets', 'wb') as filepath:\n\t\tpickle.dump(offsets, filepath)\n\t\n\tprint(durations)\n\treturn notes, offsets, durations\n\ndef prepare_sequences(notes, n_vocab):\n\t\"\"\" Prepare the sequences used by the Neural Network \"\"\"\n\tsequence_length = 100\n\n\t# get all pitch names\n\tpitchnames = sorted(set(item for item in notes))\n\n\t # create a dictionary to map pitches to integers\n\tnote_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n\tnetwork_input = []\n\tnetwork_output = []\n\n\t# create input sequences and the corresponding outputs\n\tfor i in range(0, len(notes) - sequence_length, 1):\n\t\tsequence_in = notes[i:i + sequence_length]\n\t\tsequence_out = notes[i + sequence_length]\n\t\tnetwork_input.append([note_to_int[char] for char in sequence_in])\n\t\tnetwork_output.append(note_to_int[sequence_out])\n\n\tn_patterns = len(network_input)\n\n\t# reshape the input into a format compatible with LSTM layers\n\tnetwork_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))\n\t# normalize input\n\tnetwork_input = network_input / float(n_vocab)\n\n\tnetwork_output = np_utils.to_categorical(network_output)\n\n\treturn (network_input, network_output)\n\ndef create_network(network_input_notes, n_vocab_notes, network_input_offsets, n_vocab_offsets, network_input_durations, n_vocab_durations):\n\t\n\t# Branch of the network that considers notes\n\tinputNotesLayer = Input(shape=(network_input_notes.shape[1], network_input_notes.shape[2]))\n\tinputNotes = LSTM(\n\t\t256,\n\t\tinput_shape=(network_input_notes.shape[1], network_input_notes.shape[2]),\n\t\treturn_sequences=True\n\t)(inputNotesLayer)\n\tinputNotes = Dropout(0.2)(inputNotes)\n\t\n\t# Branch of the network that considers note offset\n\tinputOffsetsLayer = Input(shape=(network_input_offsets.shape[1], network_input_offsets.shape[2]))\n\tinputOffsets = LSTM(\n\t\t256,\n\t\tinput_shape=(network_input_offsets.shape[1], network_input_offsets.shape[2]),\n\t\treturn_sequences=True\n\t)(inputOffsetsLayer)\n\tinputOffsets = Dropout(0.2)(inputOffsets)\n\t\n\t# Branch of the network that considers note duration\n\tinputDurationsLayer = Input(shape=(network_input_durations.shape[1], network_input_durations.shape[2]))\n\tinputDurations = LSTM(\n\t\t256,\n\t\tinput_shape=(network_input_durations.shape[1], network_input_durations.shape[2]),\n\t\treturn_sequences=True\n\t)(inputDurationsLayer)\n\t#inputDurations = Dropout(0.3)(inputDurations)\n\tinputDurations = Dropout(0.2)(inputDurations)\n\t\n\t#Concatentate the three input networks together into one branch now\n\tinputs = concatenate([inputNotes, inputOffsets, inputDurations])\n\t\n\t# A cheeky LSTM to consider everything learnt from the three separate branches\n\tx = LSTM(512, return_sequences=True)(inputs)\n\tx = Dropout(0.3)(x)\n\tx = LSTM(512)(x)\n\tx = BatchNorm()(x)\n\tx = Dropout(0.3)(x)\n\tx = Dense(256, activation='relu')(x)\n\t\n\t#Time to split into three branches again...\n\t\n\t# Branch of the network that classifies the note\n\toutputNotes = Dense(128, activation='relu')(x)\n\toutputNotes = BatchNorm()(outputNotes)\n\toutputNotes = Dropout(0.3)(outputNotes)\n\toutputNotes = Dense(n_vocab_notes, activation='softmax', name=\"Note\")(outputNotes)\n\t\n\t# Branch of the network that classifies the note offset\n\toutputOffsets = Dense(128, activation='relu')(x)\n\toutputOffsets = BatchNorm()(outputOffsets)\n\toutputOffsets = Dropout(0.3)(outputOffsets)\n\toutputOffsets = Dense(n_vocab_offsets, activation='softmax', name=\"Offset\")(outputOffsets)\n\t\n\t# Branch of the network that classifies the note duration\n\toutputDurations = Dense(128, activation='relu')(x)\n\toutputDurations = BatchNorm()(outputDurations)\n\toutputDurations = Dropout(0.3)(outputDurations)\n\toutputDurations = Dense(n_vocab_durations, activation='softmax', name=\"Duration\")(outputDurations)\n\t\n\t# Tell Keras what our inputs and outputs are \n\tmodel = Model(inputs=[inputNotesLayer, inputOffsetsLayer, inputDurationsLayer], outputs=[outputNotes, outputOffsets, outputDurations])\n\t\n\t#Adam seems to be faster than RMSProp and learns better too \n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\t# Useful to try RMSProp though\n\t\n\t# LOAD WEIGHTS HERE IF YOU WANT TO CONTINUE TRAINING!\n\t#model.load_weights(weights_name)\n\n\treturn model\n\ndef train(model, network_input_notes, network_input_offsets, network_input_durations, network_output_notes, network_output_offsets, network_output_durations):\n\t\"\"\" train the neural network \"\"\"\n\tfilepath = \"weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5\"\n\tcheckpoint = ModelCheckpoint(\n\t\tfilepath,\n\t\tmonitor='loss',\n\t\tverbose=0,\n\t\tsave_best_only=True,\n\t\tmode='min'\n\t)\n\tcallbacks_list = [checkpoint]\n\n\tmodel.fit([network_input_notes, network_input_offsets, network_input_durations], [network_output_notes, network_output_offsets, network_output_durations], epochs=1000, batch_size=64, callbacks=callbacks_list, verbose=1)\n\nif __name__ == '__main__':\n\t#weights_name = 'weights-improvement-41-0.9199-bigger.hdf5'\n\ttrain_network()\n", "sub_path": "lstm-new-tf2.py", "file_name": "lstm-new-tf2.py", "file_ext": "py", "file_size_in_byte": 7377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "glob.glob", "line_number": 35, "usage_type": "call"}, {"api_name": "music21.converter.parse", "line_number": 36, "usage_type": "call"}, {"api_name": "music21.converter", "line_number": 36, "usage_type": "name"}, {"api_name": "music21.instrument.partitionByInstrument", "line_number": 43, "usage_type": "call"}, {"api_name": "music21.instrument", "line_number": 43, "usage_type": "name"}, {"api_name": "music21.note.Note", "line_number": 53, "usage_type": "attribute"}, {"api_name": "music21.note", "line_number": 53, "usage_type": "name"}, {"api_name": "music21.chord.Chord", "line_number": 56, "usage_type": "attribute"}, {"api_name": "music21.chord", "line_number": 56, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 68, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 71, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 74, "usage_type": "call"}, {"api_name": "music21.note", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils", "line_number": 106, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.concatenate", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.keras.Model", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 186, "usage_type": "call"}]} +{"seq_id": "46852250", "text": "import uuid\nimport time\nimport datetime\nfrom binascii import crc32\nfrom typing import Iterable\nfrom marshmallow import fields\nimport pytz\nfrom werkzeug.datastructures import ImmutableMultiDict\n\n\nclass TimeCased(fields.Field):\n def _serialize(self, value, attr, obj, **kwargs):\n if value is None:\n return \"\"\n shanghai = pytz.timezone(\"Asia/Shanghai\")\n return pytz.utc.localize(value).astimezone(shanghai).strftime('%Y-%m-%d %H:%M:%S')\n\n def _deserialize(self, value, attr, data, **kwargs):\n return value\n\n\ndef create_uuid_and_crc32():\n uuid_ = str(uuid.uuid4())\n uuid_crc32 = crc32(uuid_.encode())\n return uuid_, uuid_crc32\n\n\ndef get_crc32(string):\n return crc32(string.encode())\n\n\ndef localtime_to_utctime(localtime: datetime) -> datetime:\n timestamp = time.mktime(localtime.timetuple())\n utc_datetime = datetime.utcfromtimestamp(timestamp)\n return utc_datetime\n\n\ndef utctime_to_localtime(utctime: datetime) -> datetime:\n shanghai = pytz.timezone(\"Asia/Shanghai\")\n return pytz.utc.localize(utctime).astimezone(shanghai)\n\n\ndef pagination_slice(list_, page, page_size):\n start = (page - 1) * page_size\n return list_[start: start + page_size]\n\n\ndef drop_duplicates(iterator: Iterable) -> list:\n return list(set(iterator))\n\n\ndef delete_empty_data(data):\n new_args = dict()\n for key, value in data.items():\n if key not in [\"\", None] and value not in [\"\", None]:\n new_args[key] = value\n return ImmutableMultiDict(\n list(\n zip(\n new_args.keys(),\n new_args.values()\n )\n )\n )\n\n\ndef string_to_list(ids: str):\n if ids is not None:\n ids = ids.split(\",\")\n ids = list(map(lambda id_: int(id_), ids))\n return ids\n return None\n\n\ndef expire_time(expire_in):\n shanghai = pytz.timezone(\"Asia/Shanghai\")\n cos_policy_expiration = pytz.utc.localize(datetime.datetime.now().replace(\n microsecond=0) + datetime.timedelta(\n seconds=expire_in)).astimezone(shanghai).strftime('%Y-%m-%dT%H:%M:%S')\n return cos_policy_expiration\n", "sub_path": "python/marshmallow/time_utils.py", "file_name": "time_utils.py", "file_ext": "py", "file_size_in_byte": 2127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "marshmallow.fields.Field", "line_number": 11, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 11, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 15, "usage_type": "call"}, {"api_name": "pytz.utc.localize", "line_number": 16, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 16, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 23, "usage_type": "call"}, {"api_name": "binascii.crc32", "line_number": 24, "usage_type": "call"}, {"api_name": "binascii.crc32", "line_number": 29, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.utcfromtimestamp", "line_number": 34, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 39, "usage_type": "call"}, {"api_name": "pytz.utc.localize", "line_number": 40, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 40, "usage_type": "attribute"}, {"api_name": "typing.Iterable", "line_number": 48, "usage_type": "name"}, {"api_name": "werkzeug.datastructures.ImmutableMultiDict", "line_number": 57, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 76, "usage_type": "call"}, {"api_name": "pytz.utc.localize", "line_number": 77, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 77, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "601646579", "text": "#!/usr/bin/env python\n# Tsung-Yi Lin \n# Ramakrishna Vedantam \n\nimport copy\nimport json\nimport math\nimport os\n\nimport numpy as np\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\n\n\nPUNCTUATIONS = [\"''\", \"'\", \"``\", \"`\", \"(\", \")\", \"{\", \"}\", \"[\", \"]\", \\\n \".\", \"?\", \"!\", \",\", \":\", \"-\", \"--\", \"...\", \";\"]\n\n\ndef term_frequency(sentence, ngrams=4):\n \"\"\"Given a sentence, calculates term frequency of tuples.\n\n Parameters\n ----------\n sentence : str\n Sentence whose term frequency has to be calculated.\n ngrams : int\n Number of n-grams for which term frequency is calculated.\n\n Returns\n -------\n dict\n {tuple : int} key-value pairs representing term frequency.\n \"\"\"\n sentence = sentence.lower().strip()\n for punc in PUNCTUATIONS:\n sentence = sentence.replace(punc, \"\")\n words = TreebankWordTokenizer().tokenize(sentence)\n counts = {}\n for i in range(ngrams):\n for j in range(len(words) - i):\n ngram = tuple(words[j:(j + i + 1)])\n if ngram in counts:\n counts[ngram] += 1\n else:\n counts[ngram] = 1\n return counts\n\n\ndef cook_refs(refs, n=4):\n '''Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)\n '''\n return [term_frequency(ref, n) for ref in refs]\n\n\ndef cook_test(test, n=4):\n '''Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.\n :param test: list of string : hypothesis sentence for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (dict)\n '''\n return term_frequency(test, n)\n\n\nclass CiderScorer(object):\n \"\"\"CIDEr scorer.\"\"\"\n\n def copy(self):\n ''' copy the refs.'''\n new = CiderScorer(n=self.n)\n new.ctest = copy.copy(self.ctest)\n new.crefs = copy.copy(self.crefs)\n return new\n\n def __init__(self, test=None, refs=None, n=4, df_mode=\"coco-val-df\"):\n \"\"\"Singular instance.\"\"\"\n self.n = n\n self.df_mode = df_mode\n self.ctest = []\n self.crefs = []\n self.cook_append(test, refs)\n self.ref_len = None\n self.document_frequency = None\n\n def cook_append(self, test, refs):\n \"\"\"Called by constructor and __iadd__ to avoid creating new instances.\"\"\"\n if refs is not None:\n self.crefs.append(cook_refs(refs))\n if test is not None:\n self.ctest.append(cook_test(test)) ## N.B.: -1\n else:\n self.ctest.append(None) # lens of crefs and ctest have to match\n\n def size(self):\n assert len(self.crefs) == len(self.ctest), \"refs/test mismatch! %d<>%d\" % (len(self.crefs), len(self.ctest))\n return len(self.crefs)\n\n def __iadd__(self, other):\n '''add an instance (e.g., from another sentence).'''\n\n if isinstance(other, tuple):\n ## avoid creating new CiderScorer instances\n self.cook_append(other[0], other[1])\n else:\n self.ctest.extend(other.ctest)\n self.crefs.extend(other.crefs)\n return self\n\n def _compute_document_frequency(self):\n '''\n Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None\n '''\n document_frequency = {}\n if self.df_mode == \"corpus\":\n for refs in self.crefs:\n # refs, k ref captions of one image\n for ngram in set([ngram for ref in refs for (ngram, count) in ref.items()]):\n document_frequency[ngram] += 1\n assert(len(self.ctest) >= max(document_frequency.values()))\n elif self.df_mode == \"coco-val-df\":\n document_frequency_temp = json.load(open(os.path.join('data', 'coco_val_df.json')))\n # convert string to tuple\n for key in document_frequency_temp:\n document_frequency[eval(key)] = document_frequency_temp[key]\n else:\n raise ValueError(f\"df_mode can be either corpus or coco-val-df, provided {self.df_mode}!\")\n return document_frequency\n\n def compute_score(self):\n self.document_frequency = self._compute_document_frequency()\n def counts2vec(cnts):\n \"\"\"\n Function maps counts of ngram to vector of tfidf weights.\n The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.\n The n-th entry of array denotes length of n-grams.\n :param cnts:\n :return: vec (array of dict), norm (array of float), length (int)\n \"\"\"\n vec = [{} for _ in range(self.n)]\n length = 0\n norm = [0.0 for _ in range(self.n)]\n for (ngram, term_freq) in cnts.items():\n # give word count 1 if it doesn't appear in reference corpus\n df = np.log(self.document_frequency.get(ngram, 1.0))\n # ngram index\n n = len(ngram) - 1\n # tf (term_freq) * idf (precomputed idf) for n-grams\n vec[n][ngram] = float(term_freq) * (self.ref_len - df)\n # compute norm for the vector. the norm will be used for\n # computing similarity\n norm[n] += pow(vec[n][ngram], 2)\n\n if n == 1:\n length += term_freq\n norm = [np.sqrt(n) for n in norm]\n return vec, norm, length\n\n def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):\n '''\n Compute the cosine similarity of two vectors.\n :param vec_hyp: array of dictionary for vector corresponding to hypothesis\n :param vec_ref: array of dictionary for vector corresponding to reference\n :param norm_hyp: array of float for vector corresponding to hypothesis\n :param norm_ref: array of float for vector corresponding to reference\n :param length_hyp: int containing length of hypothesis\n :param length_ref: int containing length of reference\n :return: array of score for each n-grams cosine similarity\n '''\n delta = float(length_hyp - length_ref)\n # measure consine similarity\n val = np.array([0.0 for _ in range(self.n)])\n for n in range(self.n):\n # ngram\n for (ngram,count) in vec_hyp[n].items():\n val[n] += vec_hyp[n].get(ngram, 0) * vec_ref[n].get(ngram, 0)\n\n if (norm_hyp[n] != 0) and (norm_ref[n] != 0):\n val[n] /= (norm_hyp[n]*norm_ref[n])\n\n assert(not math.isnan(val[n]))\n return val\n\n # compute log reference length\n if self.df_mode == \"corpus\":\n self.ref_len = np.log(float(len(self.crefs)))\n elif self.df_mode == \"coco-val-df\":\n # if coco option selected, use length of coco-val set\n self.ref_len = np.log(float(40504))\n\n scores = []\n for test, refs in zip(self.ctest, self.crefs):\n # compute vector for test captions\n vec, norm, length = counts2vec(test)\n # compute vector for ref captions\n score = np.array([0.0 for _ in range(self.n)])\n for ref in refs:\n vec_ref, norm_ref, length_ref = counts2vec(ref)\n score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)\n # change by vrama91 - mean of ngram scores, instead of sum\n score_avg = np.mean(score)\n # divide by number of references\n score_avg /= len(refs)\n # multiply score by 10\n score_avg *= 10.0\n # append score of an image to the score list\n scores.append(score_avg)\n return np.mean(np.array(scores)), np.array(scores)\n", "sub_path": "pyciderevalcap/cider/cider_scorer.py", "file_name": "cider_scorer.py", "file_ext": "py", "file_size_in_byte": 8304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.tokenize.treebank.TreebankWordTokenizer", "line_number": 36, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 75, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 76, "usage_type": "call"}, {"api_name": "json.load", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "216444699", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_2d_clf_problem(X, y, h=None):\n '''\n Plots a two-dimensional labeled dataset (X,y) and, if function h(x) is given, \n the decision surfaces.\n '''\n assert X.shape[1] == 2, \"Dataset is not two-dimensional\"\n if h!=None : \n # Create a mesh to plot in\n r = 0.02 # mesh resolution\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, r),\n np.arange(y_min, y_max, r))\n XX=np.c_[xx.ravel(), yy.ravel()]\n try:\n Z_test = h(XX)\n if Z_test.shape == ():\n # h returns a scalar when applied to a matrix; map explicitly\n Z = np.array(map(h,XX))\n else :\n Z = Z_test\n except ValueError:\n # can't apply to a matrix; map explicitly\n Z = np.array(map(h,XX))\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Pastel1)\n\n # Plot the dataset\n plt.scatter(X[:,0],X[:,1], c=y, cmap=plt.cm.Paired, marker='o', s=50);\n", "sub_path": "labosi/lab-2/2015-16/by_unknown/mlutils.py", "file_name": "mlutils.py", "file_ext": "py", "file_size_in_byte": 1225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.meshgrid", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "37699011", "text": "import nltk\nfrom nltk.corpus import gutenberg\n\n\ndef unusual_words(text):\n text_vocab = set(w.lower() for w in text if w.isalpha())\n english_vocab = set(w.lower() for w in nltk.corpus.words.words() )\n unusual = text_vocab.difference(english_vocab)\n return sorted(unusual)\n\n\nlist_of_unusual_words = unusual_words(gutenberg.words('austen-sense.txt'))\nprint(list_of_unusual_words)\n\n\n# stop words\n\nfrom nltk.corpus import stopwords\n\n\nprint(stopwords.words('english'))", "sub_path": "lexical-resources-vocabulary.py", "file_name": "lexical-resources-vocabulary.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.corpus.words.words", "line_number": 7, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 7, "usage_type": "attribute"}, {"api_name": "nltk.corpus.gutenberg.words", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.corpus.gutenberg", "line_number": 12, "usage_type": "name"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "174612595", "text": "from __future__ import print_function\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nimport pandas as pd\nimport os\n\nSPREADSHEET_ID = '1VLmc3ztGtdGbx9eByuzozoL7C7KxIqqI5o5sgheA4SQ' # \nSHEET_NAMES = ['Weights', 'C&S', 'Aero', 'Structures', 'FPP', 'Operations', 'Internal']\n\n\nclass GoogleSheetsDataImport(object):\n\n def __init__(self, SPREADSHEET_ID: str = SPREADSHEET_ID, *pages):\n\n self.__sheet_id = SPREADSHEET_ID\n self.__pages = pages\n\n self.__sheets = {}\n self.__import_sheets()\n\n self.__dataframes = {key: self.__sheet_to_dataframe(value) for key, value in self.__sheets.items()}\n\n def get_data(self):\n return self.__dataframes\n\n def __import_sheets(self):\n\n scopes = 'https://www.googleapis.com/auth/spreadsheets.readonly'\n # Setup the Sheets API\n store = file.Storage('/'.join(os.getcwd().split('/')[:-1]) + '/tools/credentials.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('/'.join(os.getcwd().split('/')[:-1]) + '/tools/client_secret.json', scopes)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n # Call the Sheets API\n for page in self.__pages:\n gsheet = service.spreadsheets().values().get(spreadsheetId=self.__sheet_id, range=page).execute()\n self.__sheets[page] = gsheet\n\n @staticmethod\n def __sheet_to_dataframe(gsheet):\n\n def istext(item: str):\n for char in item:\n if 65<= ord(char) <= 90 or 97 <= ord(char) <= 122:\n return True\n else:\n continue\n\n try:\n header = gsheet.get('values', [])[0] # Assumes first line is header!\n\n except IndexError:\n return\n\n values = gsheet.get('values', [])[1:] # Everything else is data.\n\n if not values:\n print('No data found.')\n return pd.Series(header)\n\n else:\n all_data = []\n for col_id, col_name in enumerate(header):\n column_data = []\n for row in values:\n #print(col_id)\n item = row[col_id]\n #print(item)\n if '[' in item:\n item = [float(i) for i in item[1:-1].split(',')]\n\n elif col_name == 'Date' or col_name == 'Notes':\n pass\n\n elif not istext(item):\n item = float(item)\n\n else:\n pass\n\n column_data.append(item)\n\n ds = pd.Series(data=column_data, name=col_name)\n all_data.append(ds)\n df = pd.concat(all_data, axis=1)\n return df.iloc[0]\n\n\nif __name__ == '__main__':\n\n G = GoogleSheetsDataImport(SPREADSHEET_ID, 'Weights', 'C&S')\n data = G.get_data()\n", "sub_path": "src/tools/GoogleSheetsImportMac.py", "file_name": "GoogleSheetsImportMac.py", "file_ext": "py", "file_size_in_byte": 3076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "oauth2client.file.Storage", "line_number": 31, "usage_type": "call"}, {"api_name": "oauth2client.file", "line_number": 31, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 31, "usage_type": "call"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 34, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 34, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 34, "usage_type": "call"}, {"api_name": "oauth2client.tools.run_flow", "line_number": 35, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 35, "usage_type": "name"}, {"api_name": "googleapiclient.discovery.build", "line_number": 36, "usage_type": "call"}, {"api_name": "httplib2.Http", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "5901086", "text": "\"\"\"\n Proximal Policy Optimization (PPO) Algorithm\n\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.utils import shuffle\n\n\nclass PPO(object):\n def __init__(self, s_dim, a_dim, sess, policy_logvar=-1.0,\n lr_a=0.0001, lr_c=0.001, gamma=0.99,\n epsilon=0.2, batch_size=256,\n c_epochs=10, a_epochs=10, clipping_range=0.2):\n self.sess = sess\n\n self.replay_buffer_x = None\n self.replay_buffer_y = None\n self.c_epochs, self.a_epochs = c_epochs, a_epochs\n\n self.s_dim, self.a_dim = s_dim, a_dim\n self.lr_a, self.lr_c = lr_a, lr_c\n self.gamma = gamma\n self.epsilon = epsilon\n self.policy_logvar = policy_logvar\n self.batch_size = batch_size\n\n self.policy_logvar = policy_logvar\n self.clipping_range = clipping_range\n\n self._placeholders()\n self.v = self._build_value_net(self.s_ph, scope='value_function', trainable=True)\n\n # actor\n self.means, self.log_vars = self._build_policy_net(self.s_ph, 'policy', trainable=True)\n self.logp, self.logp_old = self._logprob()\n\n self.sampled_act = self.means + tf.exp(self.log_vars / 2.0) * tf.random_normal(shape=[self.a_dim,])\n\n self.c_loss = tf.reduce_mean(tf.square(self.v - self.val_ph))\n self.c_train_op = tf.train.AdamOptimizer(self.lr_c).minimize(self.c_loss)\n\n # clipped surrogate objective\n pg_ratio = tf.exp(self.logp - self.logp_old)\n clipped_pg_ratio = tf.clip_by_value(pg_ratio, 1 - self.clipping_range, 1 + self.clipping_range)\n surrogate_loss = tf.minimum(self.adv_ph * pg_ratio, self.adv_ph * clipped_pg_ratio)\n self.a_loss = - tf.reduce_mean(surrogate_loss)\n self.a_train_op = tf.train.AdamOptimizer(self.lr_a).minimize(self.a_loss)\n\n self.sess.run(tf.global_variables_initializer())\n self._print_hyperparams()\n\n print('-- INFO: PPO initialized.')\n print('==========================')\n\n def _print_hyperparams(self):\n print('------------------- Hyperparameters ----------------------')\n print('-- S_Dim:', self.s_dim)\n print('-- A_Dim:', self.a_dim)\n print('-- LR_V:', self.lr_c)\n print('-- LR_Actor:', self.lr_a)\n print('-- Gamma:', self.gamma)\n print('-- Batch_size:', self.batch_size)\n print('--')\n\n def _placeholders(self):\n \"\"\" Input placeholders\"\"\"\n # observations, actions and advantages:\n self.s_ph = tf.placeholder(tf.float32, [None, self.s_dim], 'state')\n self.a_ph = tf.placeholder(tf.float32, [None, self.a_dim], 'action')\n self.adv_ph = tf.placeholder(tf.float32, [None, ], 'advantages')\n self.val_ph = tf.placeholder(tf.float32, [None, 1], 'val_valfunc')\n\n self.old_log_vars_ph = tf.placeholder(tf.float32, [self.a_dim, ], 'old_log_vars')\n self.old_means_ph = tf.placeholder(tf.float32, [None, self.a_dim], 'old_means')\n\n def _build_value_net(self, s, scope, trainable):\n with tf.variable_scope(scope):\n fc1 = tf.layers.dense(s, 200, activation=tf.nn.relu, name='fc1', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / self.s_dim)))\n fc2 = tf.layers.dense(fc1, 100, activation=tf.nn.relu, name='fc2', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 200)))\n v = tf.layers.dense(fc2, 1, activation=None, name='v_value', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 100)))\n return v\n\n def _build_policy_net(self, s, scope, trainable):\n with tf.variable_scope(scope):\n fc1 = tf.layers.dense(s, 200, activation=tf.nn.relu, name='fc1', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / self.s_dim)))\n fc2 = tf.layers.dense(fc1, 100, activation=tf.nn.relu, name='fc2', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 200)))\n means = tf.layers.dense(fc2, self.a_dim, activation=tf.nn.tanh, name='means', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 100)))\n logvar_speed = (10 * 64) // 48\n spd_log_vars = tf.get_variable('spd_logvars', [logvar_speed, self.a_dim], tf.float32,\n tf.constant_initializer(0.0))\n log_vars = tf.reduce_sum(spd_log_vars, axis=0) + self.policy_logvar\n\n return means, log_vars\n\n def _logprob(self):\n \"\"\" Calculate log probabilities of a batch of observations & actions\n\n Calculates log probabilities using previous step's model parameters and\n new parameters being trained.\n \"\"\"\n logp = -0.5 * tf.reduce_sum(self.log_vars)\n logp += -0.5 * tf.reduce_sum(tf.square(self.a_ph - self.means) /\n tf.exp(self.log_vars), axis=1)\n\n logp_old = -0.5 * tf.reduce_sum(self.old_log_vars_ph)\n logp_old += -0.5 * tf.reduce_sum(tf.square(self.a_ph - self.old_means_ph) /\n tf.exp(self.old_log_vars_ph), axis=1)\n\n return logp, logp_old\n\n def choose_action(self, s):\n s = s[np.newaxis, :]\n a = self.sess.run(self.sampled_act, feed_dict={self.s_ph: s})[0]\n return np.clip(a, -1, 1)\n\n def predict_v(self, s):\n \"\"\" Predict method \"\"\"\n y_hat = self.sess.run(self.v, feed_dict={self.s_ph: s})\n\n return np.squeeze(y_hat)\n\n def update_p(self, observes, actions, advantages):\n \"\"\" Update policy based on observations, actions and advantages\n\n Args:\n observes: observations, shape = (N, obs_dim)\n actions: actions, shape = (N, act_dim)\n advantages: advantages, shape = (N,)\n \"\"\"\n feed_dict = {self.s_ph: observes,\n self.a_ph: actions,\n self.adv_ph: advantages,\n }\n\n old_means_np, old_log_vars_np = self.sess.run([self.means, self.log_vars], feed_dict)\n feed_dict[self.old_log_vars_ph] = old_log_vars_np\n feed_dict[self.old_means_ph] = old_means_np\n\n a_loss = 0\n for e in range(self.a_epochs):\n # TODO: need to improve data pipeline - re-feeding data every epoch\n self.sess.run(self.a_train_op, feed_dict)\n a_loss = self.sess.run(self.a_loss, feed_dict)\n\n return a_loss\n\n def update_v(self, x, y):\n \"\"\" Fit model to current data batch + previous data batch\n\n Args:\n x: features\n y: target\n logger: logger to save training loss and % explained variance\n \"\"\"\n num_batches = max(x.shape[0] // self.batch_size, 1)\n batch_size = x.shape[0] // num_batches\n\n if self.replay_buffer_x is None:\n x_train, y_train = x, y\n else:\n x_train = np.concatenate([x, self.replay_buffer_x])\n y_train = np.concatenate([y, self.replay_buffer_y])\n self.replay_buffer_x = x\n self.replay_buffer_y = y\n\n for e in range(self.c_epochs):\n x_train, y_train = shuffle(x_train, y_train)\n for j in range(num_batches):\n start = j * batch_size\n end = (j + 1) * batch_size\n feed_dict = {self.s_ph: x_train[start:end, :],\n self.val_ph: y_train[start:end].reshape(-1, 1)}\n _, l = self.sess.run([self.c_train_op, self.c_loss], feed_dict=feed_dict)\n\n y_hat = self.predict_v(x)\n c_loss = np.mean(np.square(y_hat - y)) # explained variance after update\n\n return c_loss\n", "sub_path": "networks/ppo.py", "file_name": "ppo.py", "file_ext": "py", "file_size_in_byte": 7946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.exp", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.constant_initializer", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 184, "usage_type": "call"}]} +{"seq_id": "301660674", "text": "#! /usr/local/bin/python3\n\nimport sys\nfrom skimage import io\nfrom matplotlib import pyplot as plt\n\nFILE_NAME = sys.argv[1]\nTARGET_FILE = \"textrender\"\n\nimg = io.imread(FILE_NAME)\n\nfig = plt.figure()\nplt.imshow(img)\nplt.text(400,400, \"hello\", color=\"white\", fontsize=5)\nfig.savefig(TARGET_FILE+\".pdf\", dpi=600, bbox_inches=\"tight\")\n", "sub_path": "render_text.py", "file_name": "render_text.py", "file_ext": "py", "file_size_in_byte": 331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "skimage.io.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "180772050", "text": "from setuptools import setup\nfrom codecs import open\nfrom os import path\n\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='images2gif',\n version='1.0.0',\n description='Python 3 compatible images2gif.py',\n long_description=long_description,\n url='https://github.com/isaacgerg/images2gif',\n author='Almar Klein, Ant1, Marius van Voorden',\n license='BSD',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 3.5',\n ],\n py_modules=['images2gif'],\n install_requires=['numpy>=1.11.1', 'Pillow>=3.3.1', 'scipy>=0.18.0'],\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "618730489", "text": "from torchvision import datasets, transforms\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom Facenet.load_dataset import load_dataset\r\nfrom Model.MobileFacenet import MobileFaceNet\r\n\r\nimport torch.nn.functional\r\nimport torch\r\nimport pickle\r\nimport math\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef create_svm():\r\n model = MobileFaceNet(512).to(torch.device(\"cuda:0\"))\r\n model.load_state_dict(torch.load('../PretrainedModel/model.pth'))\r\n\r\n dataset = load_dataset('../Dataset/Processed/')\r\n images = []\r\n labels = []\r\n\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])\r\n\r\n for class_name in dataset:\r\n for path in class_name.paths:\r\n img = cv2.imread(path)\r\n img = cv2.resize(img,(112,112))\r\n img = transform(img)\r\n img = img.type(torch.FloatTensor)\r\n images.append(img)\r\n labels.append(class_name.name)\r\n\r\n img_batch = torch.utils.data.DataLoader(images, batch_size=32)\r\n labels = np.array(labels)\r\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True, num_workers=2)\r\n\r\n #---------------------CREATE EMBEDDING AND LABEL-----------------------------------------\r\n labels_encoder = LabelEncoder().fit(labels)\r\n labelsNum = labels_encoder.transform(labels)\r\n nClasses = len(labels_encoder.classes_)\r\n nrof_img = len(labelsNum)\r\n emb = np.zeros((nrof_img,512))\r\n idx = 0\r\n\r\n model.eval()\r\n\r\n for batch in iter(img_batch):\r\n with torch.no_grad():\r\n batch = batch.to(torch.device(\"cuda:0\"))\r\n embedding = model(batch).cpu()\r\n emb[idx:idx+32,:] = embedding\r\n idx += 32\r\n\r\n clf = SVC(C=1, kernel='linear', probability=True)\r\n clf.fit(emb,labelsNum)\r\n\r\n fname = '../PretrainedModel/classifier.pkl'\r\n with open(fname, 'wb') as f:\r\n pickle.dump((labels_encoder, clf), f)\r\n", "sub_path": "create_svm.py", "file_name": "create_svm.py", "file_ext": "py", "file_size_in_byte": 1870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "Model.MobileFacenet.MobileFaceNet", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 17, "usage_type": "call"}, {"api_name": "Facenet.load_dataset.load_dataset", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 55, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "465295199", "text": "import tensorflow as tf\nfrom nets.inception_v3 import inception_v3, inception_v3_arg_scope\nfrom tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step\nfrom tensorflow.python.platform import tf_logging as logging\nfrom preprocessing import preprocessing_factory\nfrom sklearn.metrics import confusion_matrix\nimport os\nimport time\n\nimport numpy as np\n\nslim = tf.contrib.slim\n\nlog_dir = 'tmp/log/both_with_cp'\nlog_eval = 'tmp/log_eval_test/both_with_cp'\n\n# two dataset directories\ndataset_dir_rgb = 'tmp/dataset_parallel_trf/rgb'\ndataset_dir_depth = 'tmp/dataset_parallel_trf/depth'\n# correspondingly two labels files\nlabels_file_rgb = 'tmp/dataset_parallel_trf/rgb/labels.txt'\nlabels_file_depth = 'tmp/dataset_parallel_trf/depth/labels.txt'\n\nbatch_size = 10\n\nnum_epochs = 1\n\nnum_classes = 5\n\nlabels = open(labels_file_rgb, 'r')\nlabels_to_name = {}\nfor line in labels:\n label, string_name = line.split(':')\n string_name = string_name[:-1]\n labels_to_name[int(label)] = string_name\n\nfile_pattern = 'objects_%s_*.tfrecord'\n\nitems_to_descriptions = {\n 'image': 'A 3-channel RGB coloured product image',\n 'label': 'A label that from 4 labels'\n}\n\ncheckpoint_file = tf.train.latest_checkpoint(log_dir)\n\ndef get_split(split_name, dataset_dir, file_pattern=file_pattern, file_pattern_for_counting='objects'):\n if split_name not in ['train', 'validation']:\n raise ValueError(\n 'The split_name %s is not recognized. Please input either train or validation as the split_name' % (\n split_name))\n\n file_pattern_path = os.path.join(dataset_dir, file_pattern % (split_name))\n\n num_samples = 0\n file_pattern_for_counting = 'objects' + '_' + split_name\n tfrecords_to_count = [os.path.join(dataset_dir, file) for file in os.listdir(dataset_dir) if\n file.startswith(file_pattern_for_counting)]\n for tfrecord_file in tfrecords_to_count:\n for record in tf.python_io.tf_record_iterator(tfrecord_file):\n num_samples += 1\n\n test = num_samples\n\n reader = tf.TFRecordReader\n\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),\n 'image/class/label': tf.FixedLenFeature(\n [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n }\n\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image(),\n 'label': slim.tfexample_decoder.Tensor('image/class/label'),\n }\n\n decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)\n\n labels_to_name_dict = labels_to_name\n\n dataset = slim.dataset.Dataset(\n data_sources=file_pattern_path,\n decoder=decoder,\n reader=reader,\n num_readers=4,\n num_samples=num_samples,\n num_classes=num_classes,\n labels_to_name=labels_to_name_dict,\n items_to_descriptions=items_to_descriptions)\n\n return dataset\n\ndef load_batch(dataset, batch_size, is_training=True):\n '''\n Loads a batch for training.\n\n INPUTS:\n - dataset(Dataset): a Dataset class object that is created from the get_split function\n - batch_size(int): determines how big of a batch to train\n - height(int): the height of the image to resize to during preprocessing\n - width(int): the width of the image to resize to during preprocessing\n - is_training(bool): to determine whether to perform a training or evaluation preprocessing\n\n OUTPUTS:\n - images(Tensor): a Tensor of the shape (batch_size, height, width, channels) that contain one batch of images\n - labels(Tensor): the batch's labels with the shape (batch_size,) (requires one_hot_encoding).\n\n '''\n # First create the data_provider object\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n common_queue_capacity=24 + 3 * batch_size,\n common_queue_min=24,\n shuffle=False)\n\n # # Obtain the raw image using the get method\n image, label = data_provider.get(['image', 'label'])\n\n # # Perform the correct preprocessing for this image depending if it is training or evaluating\n image_preprocessing_fn = preprocessing_factory.get_preprocessing('inception_v3',is_training=False)\n\n train_image_size = 256\n image = image_preprocessing_fn(image, train_image_size, train_image_size)\n\n # # Batch up the image by enqueing the tensors internally in a FIFO queue and dequeueing many elements with tf.train.batch.\n images, labels = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=4,\n capacity=5 * batch_size)\n\n return images, labels\n\n\ndef run():\n end_points = {}\n if not os.path.exists(log_eval):\n os.mkdir(log_eval)\n with tf.Graph().as_default() as graph:\n tf.logging.set_verbosity(tf.logging.INFO) # Set the verbosity to INFO level\n \n ########################################################\n # Get RGB dataset and the Imagenet trained on RGB images\n ########################################################\n \n # First create the dataset and load one batch\n dataset_rgb = get_split('validation', dataset_dir_rgb, file_pattern=file_pattern)\n images_rgb, labels_rgb = load_batch(dataset_rgb, batch_size=batch_size)\n\n # Know the number steps to take before decaying the learning rate and batches per epoch\n num_batches_per_epoch = int(dataset_rgb.num_samples / batch_size)\n num_steps_per_epoch = num_batches_per_epoch # Because one step is one batch processed\n\n with tf.variable_scope(\"net_rgb\"):\n # Create the model inference\n with slim.arg_scope(inception_v3_arg_scope()):\n logits_rgb, end_points_rgb = inception_v3(images_rgb, num_classes=dataset_rgb.num_classes, is_training=True)\n\n \n ########################################################\n # Get depth dataset and the Imagenet trained on depth images\n ########################################################\n \n # First create the dataset and load one batch\n dataset_depth = get_split('validation', dataset_dir_depth, file_pattern=file_pattern)\n images_depth, labels_depth = load_batch(dataset_depth, batch_size=batch_size)\n\n # Create the model inference\n with tf.variable_scope(\"net_depth\"):\n with slim.arg_scope(inception_v3_arg_scope()):\n logits_depth, end_points_depth = inception_v3(images_depth, num_classes=dataset_rgb.num_classes, is_training=True)\n\n ########################################################\n # Combine the models with the concatenation operation\n # and add an FC layer on top\n ########################################################\n \n # \n with tf.variable_scope(\"concat_dense\"): \n W_master = tf.Variable(tf.random_uniform([10, 5], -0.01, 0.01), name = \"weights_concat\")\n b_master = tf.Variable(tf.zeros([5]), name = \"bias_concat\")\n \n h_master = tf.matmul(tf.concat((logits_rgb, logits_depth), axis=1), W_master) + b_master\n \n logits2 = tf.layers.dense(inputs=h_master, units=(num_classes * 2), name=\"dense_concat1\")\n \n logits = tf.layers.dense(inputs=logits2, units=num_classes, name=\"dense_concat0\")\n \n end_points['Logits'] = logits\n end_points['Predictions'] = slim.softmax(logits, scope='Predictions')\n \n variables_to_restore = slim.get_variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n def restore_fn(sess):\n return saver.restore(sess, checkpoint_file)\n \n ####################################################\n # EVALUATION\n ####################################################\n\n predictions = tf.argmax(end_points['Predictions'], 1)\n accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, labels_rgb)\n metrics_op = tf.group(accuracy_update)\n\n global_step = get_or_create_global_step()\n global_step_op = tf.assign(global_step, global_step + 1)\n \n conf_m = np.zeros((5, 5))\n\n def eval_step(sess, metrics_op, global_step, confusion_m):\n '''\n Simply takes in a session, runs the metrics op and some logging information.\n '''\n \n start_time = time.time()\n _, global_step_count, accuracy_value = sess.run([metrics_op, global_step_op, accuracy])\n time_elapsed = time.time() - start_time\n \n images_rgb_im, images_depth_im, labels, prediction = sess.run([images_rgb, images_depth, labels_rgb, predictions])\n \n confusion_m += confusion_matrix(labels, prediction, labels = [0, 1, 2, 3, 4])\n\n logging.info('Global Step %s: Streaming Accuracy: %.4f (%.2f sec/step)', global_step_count, accuracy_value,\n time_elapsed)\n\n return accuracy_value\n\n tf.summary.scalar('Validation_Accuracy', accuracy)\n my_summary_op = tf.summary.merge_all()\n\n sv = tf.train.Supervisor(logdir=log_eval, summary_op=None, saver=None, init_fn=restore_fn)\n\n with sv.managed_session() as sess:\n num_steps_per_epoch = int(num_steps_per_epoch)\n for step in range(num_steps_per_epoch * num_epochs):\n sess.run(sv.global_step)\n if step % num_batches_per_epoch == 0:\n logging.info('Epoch: %s/%s', step / num_batches_per_epoch + 1, num_epochs)\n logging.info('Current Streaming Accuracy: %.4f', sess.run(accuracy))\n\n if step % 10 == 0:\n eval_step(sess, metrics_op=metrics_op, global_step=sv.global_step, confusion_m = conf_m)\n summaries = sess.run(my_summary_op)\n sv.summary_computed(sess, summaries)\n\n\n else:\n eval_step(sess, metrics_op=metrics_op, global_step=sv.global_step,confusion_m = conf_m)\n\n logging.info('Final Streaming Accuracy: %.4f', sess.run(accuracy))\n \n images_rgb, images_depth, labels, predictions = sess.run([images_rgb, images_depth, labels_rgb, predictions])\n \n print (sess.run(end_points['Predictions']))\n \n print (conf_m)\n \n \n for i in range(10):\n label, prediction = labels[i], predictions[i]\n prediction_name, label_name = dataset_rgb.labels_to_name[prediction], dataset_rgb.labels_to_name[label]\n text = 'Prediction: %s \\n Ground Truth: %s' % (prediction_name, label_name)\n print(text)\n logging.info(\n 'Model evaluation has completed! Visit TensorBoard for more information regarding your evaluation.')\n \nif __name__ == '__main__':\n run()", "sub_path": "eval_both.py", "file_name": "eval_both.py", "file_ext": "py", "file_size_in_byte": 11069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.contrib", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.python_io.tf_record_iterator", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.python_io", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.TFRecordReader", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "preprocessing.preprocessing_factory.get_preprocessing", "line_number": 121, "usage_type": "call"}, {"api_name": "preprocessing.preprocessing_factory", "line_number": 121, "usage_type": "name"}, {"api_name": "tensorflow.train.batch", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.logging.set_verbosity", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 141, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 155, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3_arg_scope", "line_number": 157, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 170, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3_arg_scope", "line_number": 171, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 180, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 181, "usage_type": "call"}, {"api_name": "tensorflow.random_uniform", "line_number": 181, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 184, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 184, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 186, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 186, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 194, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.contrib.metrics.streaming_accuracy", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 204, "usage_type": "attribute"}, {"api_name": "tensorflow.group", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.python.ops.variables.get_or_create_global_step", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 210, "usage_type": "call"}, {"api_name": "time.time", "line_number": 217, "usage_type": "call"}, {"api_name": "time.time", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 223, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 225, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 225, "usage_type": "name"}, {"api_name": "tensorflow.summary.scalar", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 230, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 231, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 231, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Supervisor", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 233, "usage_type": "attribute"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 240, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 240, "usage_type": "name"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 241, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 241, "usage_type": "name"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 252, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 252, "usage_type": "name"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 266, "usage_type": "name"}]} +{"seq_id": "438107253", "text": "# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\n\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_desktopvirtualization.generated._client_factory import cf_workspace\n desktopvirtualization_workspace = CliCommandType(\n operations_tmpl='azext_desktopvirtualization.vendored_sdks.desktopvirtualization.operations._workspace_operatio'\n 'ns#WorkspaceOperations.{}',\n client_factory=cf_workspace)\n with self.command_group('desktopvirtualization workspace', desktopvirtualization_workspace,\n client_factory=cf_workspace, is_experimental=True) as g:\n g.custom_command('list', 'desktopvirtualization_workspace_list')\n g.custom_show_command('show', 'desktopvirtualization_workspace_show')\n g.custom_command('create', 'desktopvirtualization_workspace_create')\n g.custom_command('update', 'desktopvirtualization_workspace_update')\n g.custom_command('delete', 'desktopvirtualization_workspace_delete')\n\n from azext_desktopvirtualization.generated._client_factory import cf_application_group\n desktopvirtualization_application_group = CliCommandType(\n operations_tmpl='azext_desktopvirtualization.vendored_sdks.desktopvirtualization.operations._application_group_'\n 'operations#ApplicationGroupOperations.{}',\n client_factory=cf_application_group)\n with self.command_group('desktopvirtualization applicationgroup', desktopvirtualization_application_group,\n client_factory=cf_application_group, is_experimental=True) as g:\n g.custom_command('list', 'desktopvirtualization_applicationgroup_list')\n g.custom_show_command('show', 'desktopvirtualization_applicationgroup_show')\n g.custom_command('create', 'desktopvirtualization_applicationgroup_create')\n g.custom_command('update', 'desktopvirtualization_applicationgroup_update')\n g.custom_command('delete', 'desktopvirtualization_applicationgroup_delete')\n\n from azext_desktopvirtualization.generated._client_factory import cf_host_pool\n desktopvirtualization_host_pool = CliCommandType(\n operations_tmpl='azext_desktopvirtualization.vendored_sdks.desktopvirtualization.operations._host_pool_operatio'\n 'ns#HostPoolOperations.{}',\n client_factory=cf_host_pool)\n with self.command_group('desktopvirtualization hostpool', desktopvirtualization_host_pool,\n client_factory=cf_host_pool, is_experimental=True) as g:\n g.custom_command('list', 'desktopvirtualization_hostpool_list')\n g.custom_show_command('show', 'desktopvirtualization_hostpool_show')\n g.custom_command('create', 'desktopvirtualization_hostpool_create')\n g.custom_command('update', 'desktopvirtualization_hostpool_update')\n g.custom_command('delete', 'desktopvirtualization_hostpool_delete')\n", "sub_path": "src/desktopvirtualization/azext_desktopvirtualization/generated/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 3395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "azure.cli.core.commands.CliCommandType", "line_number": 19, "usage_type": "call"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_workspace", "line_number": 22, "usage_type": "name"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_workspace", "line_number": 24, "usage_type": "name"}, {"api_name": "azure.cli.core.commands.CliCommandType", "line_number": 32, "usage_type": "call"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_application_group", "line_number": 35, "usage_type": "name"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_application_group", "line_number": 37, "usage_type": "name"}, {"api_name": "azure.cli.core.commands.CliCommandType", "line_number": 45, "usage_type": "call"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_host_pool", "line_number": 48, "usage_type": "name"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_host_pool", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "547131191", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport json\nimport time\nimport requests\nimport random\nfrom dotenv import load_dotenv\nfrom loguru import logger\nfrom slackclient import SlackClient\n\nload_dotenv()\n\nSLCKBTD = None\nSLCKCLNT = SlackClient(os.getenv(\"SLACK_BOT_TOKEN\"))\n\n#[ Magic Declarations ]#\nRTM_READ_DELAY = 1\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\n#[ End magic declarations ]#\n\n#------ Define bot functions here ------------------------------------------\ndef say_hello(command, event):\n return f\"Hello {event['user']} :snake:\"\n \ndef get_kitty(command, event):\n r = requests.get(f\"https://community-placekitten.p.rapidapi.com/{random.randint(100, 600)}/{random.randint(100, 600)}\", headers={\"X-RapidAPI-Key\": os.getenv(\"X-RapidAPI-Key\")})\n open('kitty.png', 'wb').write(r.content)\n SLCKCLNT.api_call(\n 'files.upload', \n channels=event['channel'], \n as_user=True, \n filename='pic.jpg', \n file=open('kitty.png', 'rb'),\n )\n return False\n\ndef get_norris(command, event):\n r = requests.get(\"https://matchilling-chuck-norris-jokes-v1.p.rapidapi.com/jokes/random\", headers={\"X-RapidAPI-Key\": os.getenv(\"X-RapidAPI-Key\")})\n j = json.loads(r.text)\n return j[\"value\"] + \" :tada:\"\n\ndef get_insult(command, event):\n r = requests.get(f\"https://insult.mattbas.org/api/insult.txt?who={event['user']}\")\n return r.text\n#------ Add definitions to CMDS dict ---------------------------------------\n\nCMMDS = {\n # Key: func\n \"hi\": say_hello,\n \"kitten\": get_kitty,\n \"norris\": get_norris,\n \"insult\": get_insult\n}\n\ndef humanizeChannel(channel):\n return \"#{}\".format(\n SLCKCLNT.api_call(\n \"channels.info\", \n channel=channel\n ).get(\n 'channel', \n {}\n ).get('name'))\n\n\ndef humanizeUser(user):\n return SLCKCLNT.api_call(\n \"users.info\", \n user=user).get(\n 'user', \n {}\n ).get('name')\n\n\ndef parse_incoming(sevent):\n for event in sevent:\n if 'user' in event:\n event['user'] = humanizeUser(event['user'])\n\n if 'channel' in event:\n event['channel'] = humanizeChannel(event['channel'])\n\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n\n user_id, message = matchDirect(event[\"text\"])\n if user_id == SLCKBTD:\n logger.info(f\"Message Recieved in {event['channel']}: {message}\")\n return message, event\n # if 'subtype' in event and event['subtype'] != \"bot_message\":\n logger.debug(event)\n return None, None\n\ndef matchDirect(msg):\n r = re.search(MENTION_REGEX, msg)\n return (r.group(1), r.group(2).strip()) if r else (None, None)\n\ndef handle_command(command, event):\n comm = command.split(\" \")\n response = None\n\n if comm[0] in CMMDS:\n response = CMMDS[comm[0]](command, event)\n \n if response != False and response != None:\n logger_response = response.replace('\\n', ' ')[:20]\n logger.info(f\"Response: {logger_response}...\") \n SLCKCLNT.api_call(\n \"chat.postMessage\",\n channel=event['channel'],\n text=response or \"What was that? :: Try: \" + \", \".join([x for x in CMMDS.keys()])\n )\n\n if response == None:\n SLCKCLNT.api_call(\n \"chat.postMessage\",\n channel=event['channel'],\n text=\"What was that? :: Try: \" + \", \".join([x for x in CMMDS.keys()])\n )\n\nif __name__ == \"__main__\":\n if SLCKCLNT.rtm_connect(with_team_state=False):\n SLCKBTD = SLCKCLNT.api_call(\"auth.test\")[\"user_id\"]\n logger.info(f\"Bot connected {SLCKBTD}\")\n\n while True:\n command, channel = parse_incoming(SLCKCLNT.rtm_read())\n if command:\n handle_command(command, channel)\n time.sleep(RTM_READ_DELAY)\n else:\n logger.exception(\"Connection Failed\")\n \n", "sub_path": "jbot.py", "file_name": "jbot.py", "file_ext": "py", "file_size_in_byte": 3973, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 15, "usage_type": "call"}, {"api_name": "slackclient.SlackClient", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 91, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 91, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 94, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 94, "usage_type": "name"}, {"api_name": "re.search", "line_number": 98, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 110, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 110, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 127, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 127, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 133, "usage_type": "call"}, {"api_name": "loguru.logger.exception", "line_number": 135, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "221117320", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#!/usr/bin/python2.5\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom google.appengine.ext import db\nfrom google.appengine.datastore import entity_pb\n\n\ndef to_binary(data):\n \"\"\" compresses entities or lists of entities for caching.\n \n Args: \n data - arbitrary data input, on its way to memcache\n \"\"\"\n if isinstance(data, db.Model):\n # Just one instance\n return makeProtoBufObj(data)\n # if none of the first 5 items are models, don't look for entities\n elif isinstance(data, list) and any(isinstance(x, db.Model) for x in data):\n # list of entities\n entities = []\n for obj in data:\n # if item is entity, convert it.\n if isinstance(obj, db.Model):\n protobuf_obj = makeProtoBufObj(obj)\n entities.append(protobuf_obj)\n else:\n entities.append(obj)\n buffered_list = ProtoBufList(entities)\n return buffered_list\n else: # return data as is \n return data\n\n\ndef from_binary(data):\n \"\"\" decompresses entities or lists from cache.\n \n Args: \n data - arbitrary data input from memcache\n \"\"\"\n if isinstance(data, ProtoBufObj):\n # Just one instance\n return db.model_from_protobuf(entity_pb.EntityProto(data.val))\n elif isinstance(data, ProtoBufList):\n entities = []\n for obj in data.vals:\n # if item is entity, convert it.\n if isinstance(obj, ProtoBufObj):\n model_class = obj.model_class\n entities.append(db.model_from_protobuf(entity_pb.EntityProto(obj.val)))\n else:\n entities.append(obj)\n return entities\n else: # return data as is\n return data\n\nclass ProtoBufObj():\n \"\"\" special type used to identify protobuf objects \"\"\"\n def __init__(self, val, model_class): \n self.val = val\n self.model_class = model_class\n # model class makes it unnecessary to import model classes\n \nclass ProtoBufList():\n \"\"\" special type used to identify list containing protobuf objects \"\"\"\n def __init__(self, vals):\n self.vals = vals\n\ndef makeProtoBufObj(obj):\n val = db.model_to_protobuf(obj).Encode()\n model_class = db.class_for_kind(obj.kind())\n return ProtoBufObj(val, model_class) \n", "sub_path": "util/cache_compress.py", "file_name": "cache_compress.py", "file_ext": "py", "file_size_in_byte": 2863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "google.appengine.ext.db.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 26, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Model", "line_number": 30, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 30, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Model", "line_number": 35, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 35, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.model_from_protobuf", "line_number": 54, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 54, "usage_type": "name"}, {"api_name": "google.appengine.datastore.entity_pb.EntityProto", "line_number": 54, "usage_type": "call"}, {"api_name": "google.appengine.datastore.entity_pb", "line_number": 54, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.model_from_protobuf", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 61, "usage_type": "name"}, {"api_name": "google.appengine.datastore.entity_pb.EntityProto", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.datastore.entity_pb", "line_number": 61, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.model_to_protobuf", "line_number": 81, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 81, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.class_for_kind", "line_number": 82, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "596670846", "text": "import mongodb\nimport config\nimport telebot\nfrom telebot import types\n\n\nbot = telebot.TeleBot(config.token)\n\n\n@bot.message_handler(commands=['feedback'])\ndef send_contacts(message):\n bot.send_message(message.chat.id, 'Будем оставаться на связи!\\n')\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_phone = types.KeyboardButton(text=\"Отправить номер телефона\", request_contact=True)\n keyboard.add(button_phone)\n bot.send_message(message.chat.id,\n 'Добро пожаловать в BotsApp \\U0001F60A \\n'\n 'Для авторизации нажмите кнопку «Отправить мой номер».',\n reply_markup=keyboard)\n\n\n@bot.message_handler(content_types='contact')\n@bot.message_handler(func=lambda message: message.text == 'Основное меню', content_types=['text'])\ndef main_menu(message):\n mongodb.MongoAdd().inserting(message.contact)\n keyboard = types.ReplyKeyboardMarkup()\n big_button = types.KeyboardButton(text='\\U0001F5B2 Заказать бота')\n button_1a = types.KeyboardButton(text='\\U0001F532 Услуги')\n button_1b = types.KeyboardButton(text='\\U0001F47E Роботы BotsApp')\n button_2a = types.KeyboardButton(text='\\U00002754 F.A.Q.',)\n button_2b = types.KeyboardButton(text='\\U0001F4F2 Контакты')\n keyboard.row(big_button)\n keyboard.row(button_1a, button_1b)\n keyboard.row(button_2a, button_2b)\n bot.send_message(message.chat.id,\n '\\U00002754 Узнайте что такое чат-боты Telegram\\n '\n '\\U0001F441 смотрите примеры наших роботов\\n '\n '\\U0001F916 заказывайте бота\\n '\n '\\U0001F4F2 мы всегда готовы ответить на любые ваши вопросы',\n reply_markup=keyboard)\n\n\n@bot.message_handler(func=lambda message: message.text == '❔ F.A.Q.', content_types=['text'])\n@bot.message_handler(func=lambda message: message.text == '🔙 🤖 Назад', content_types=['text'])\ndef send_faq(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_1 = types.KeyboardButton(text='Что умеют?')\n button_2 = types.KeyboardButton(text='Как заказать?')\n button_3 = types.KeyboardButton(text='Стоимость')\n button_4 = types.KeyboardButton(text='Поддержка')\n button_5 = types.KeyboardButton(text='Основное меню')\n keyboard.add(button_1, button_2, button_3, button_4, button_5)\n bot.send_message(message.chat.id,\n ' Что такое бот?\\n'\n '\\U0001F916 Telegram-бот – это:\\n'\n '● диалоговый агент, который имитирует осмысленную беседу без участия человека;\\n'\n '● помогает компаниям предоставить для пользователей персонализированный сервис '\n 'и увеличить лояльность к бренду;\\n'\n '● полноценный сотрудник любой организации, который работает круглосуточно и '\n 'которому не нужно платить.',\n reply_markup=keyboard)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Что умеют?', content_types=['text'])\ndef what_can_do(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_1 = types.KeyboardButton(text='\\U0001F519 \\U0001F916 Назад')\n button_2 = types.KeyboardButton(text='Основное меню')\n keyboard.add(button_1, button_2)\n bot.send_message(message.chat.id,\n 'Что умеют наши чат-боты?\\n'\n '● Предлагают пользователю любую форму(например,бронирование столика) и обрабатывают её\\n'\n '● Выводят каталоги в удобном виде:\\n'\n '\\U000027A1список услуг компании,\\n'\n '\\U000027A1часто задаваемые вопросы,\\n'\n '\\U000027A1каталог мобильных устройств и прочее\\n'\n '● Заменяют колл-центр вашей компании\\n'\n '● Оформляют рассылку (поскольку клиент сам добавляет бота в контакты личной переписки, то ни одно' \n 'ваше сообщение не останется без внимания)\\n'\n '● и многое другое.\\n\\n'\n 'У такого бота будет несколько основных козырей:\\n'\n '\\U00002705регулярная поддержка внимания клиента к продукту/услуге\\n'\n '\\U00002705автоматические экспресс-консультации и продажи прямо в личном чате Telegram\\n'\n '\\U00002705разработка бота выйдет дешевле, чем создание своего приложения, а эффективность от него'\n ' в нынешнее время больше',\n reply_markup=keyboard)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Стоимость', content_types=['text'])\ndef how_order(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_1 = types.KeyboardButton(text='\\U0001F519 \\U0001F916 Назад')\n button_2 = types.KeyboardButton(text='Основное меню')\n keyboard.add(button_1, button_2)\n bot.send_message(message.chat.id,\n 'Цена формируется в индивидуальном порядке исходя из требований и пожеланий наших заказчиков.'\n 'Создание чат-бота это работа не по шаблону и вкаждом конкретном случае всё зависит от\\n'\n '● целей и задач, которые должен решать бот,\\n'\n '● его функционала,\\n'\n '● наличия или отсутствия админ-панели,\\n'\n '● интеграции с внешним АPI,\\n'\n '● подключением CRM и/или иных систем,\\n'\n '● возможностью внутренней обработки и прогнозировки данных,\\n'\n '● подключением аналитических функций и прочего.\\n\\n'\n 'Иными словами,чем проще или сложнее вам потребуется бот, тем дешевле или дороже выйдет его стоимость. '\n 'Начальная стоимость, от 200$ долларов.\\n\\n'\n 'На всех этапах работы мы всегда готовы предоставить вам исчерпывающие консультации и помощь в составлении ТЗ,'\n ' а также предоставим лучшие рекомендации по внедрению бота в вашу работу \\U0001F60A',\n reply_markup=keyboard)\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True, interval=True)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "telebot.TeleBot", "line_number": 7, "usage_type": "call"}, {"api_name": "config.token", "line_number": 7, "usage_type": "attribute"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 17, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 17, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 18, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 18, "usage_type": "name"}, {"api_name": "mongodb.MongoAdd", "line_number": 29, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 30, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 30, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 31, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 31, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 32, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 32, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 33, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 33, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 34, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 34, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 35, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 35, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 50, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 50, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 51, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 51, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 52, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 52, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 53, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 53, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 54, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 54, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 55, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 55, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 70, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 70, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 71, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 71, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 72, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 72, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 95, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 95, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 96, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 96, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 97, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "98235830", "text": "import RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\nfrom serial.utcp import UTCP\n\nser = serial.Serial(port=\"/dev/serial0\", baudrate=9600) # Open port with baud rate\nsender = UTCP(ser)\n \nSDA = 3\nSLC = 5\nTEC_Perisoltic = 7\nFan = 11\nPH_on = 13\nEC_on = 15\nDin = 19 #Pi4 output\nDout = 21 #Pi4 input\nSCLK = 23\nStep_CTRL = 29\nEC_Blue = 31\nEC_Red = 33\nRC_Grn = 35\nEC_Black = 37\n\nTX = 8\nRX = 10 # input \nheater = 12\nFloat = 16 #input\nwater_pump = 18\nAir_pump = 22\nADC_CS = 24 #put high\nPH_Blue = 32\nPH_Red = 36\nPH_Grn = 38\nPH_Black = 40 \nglobal control_pins\ncontrol_pins = [TEC_Perisoltic, Fan, PH_on, EC_on, Step_CTRL, heater, water_pump, Air_pump]\n\ndef off():\n global control_pins\n for pin in control_pins:\n GPIO.setup(pin, GPIO.OUT)\n \n for pin in control_pins:\n GPIO.output(pin, GPIO.LOW) \n\n for x in range(5): #turn off LEDs & solinoids\n sender.send(x, 4, 0) #solinoids\n sender.send(x, 5, 0) #red \n sender.send(x, 6, 0) #blue \n", "sub_path": "PI4/AtomgreensUI/Stop.py", "file_name": "Stop.py", "file_ext": "py", "file_size_in_byte": 1002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 2, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 2, "usage_type": "name"}, {"api_name": "RPi.GPIO.BOARD", "line_number": 2, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 3, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 3, "usage_type": "name"}, {"api_name": "serial.utcp.Serial", "line_number": 6, "usage_type": "call"}, {"api_name": "serial.utcp", "line_number": 6, "usage_type": "name"}, {"api_name": "serial.utcp.UTCP", "line_number": 7, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 41, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 41, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 44, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 44, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "243937690", "text": "from django import forms\nfrom django.core.validators import RegexValidator\n\nfrom .models import Account\nfrom django.contrib.auth.forms import UserCreationForm\nfrom projects.models import *\n\n\nclass UserRegisterationForm(UserCreationForm):\n email = forms.EmailField(required=True)\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n profile_picture = forms.ImageField(required=False)\n phone_regex = RegexValidator(regex=r'^[\\+02]?(01)(0|1|2|5)([0-9]{8})$',\n message=\"Sorry, Egyptian Phone numbers are only allowed.\")\n mobile = forms.CharField(validators=[phone_regex], max_length=14, required=False) # validators should be a list\n\n # facebook = forms.CharField(required=False, max_length=200)\n # public_info = forms.CharField(required=False, widget=forms.Textarea)\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'email', 'username', 'password1', 'password2', 'mobile', 'profile_picture']\n labels = {\n 'email': 'Email',\n 'first_name': 'First Name',\n 'last_name': 'Last Name',\n 'profile_picture': 'Profile Picture',\n }\n\n\nclass AccountUpdateForm(forms.ModelForm):\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'username', 'profile_picture', 'mobile', 'birthdate', 'country', 'facebook_profile']\n readonly_fields = ['email']\n\n def clean_email(self):\n if self.is_valid():\n email = self.cleaned_data['email']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(email=email)\n except Account.DoesNotExist:\n return email\n raise forms.ValidationError('email \"%s\" is already in use' % account.email)\n\n def clean_username(self):\n if self.is_valid():\n username = self.cleaned_data['username']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(username=username)\n except Account.DoesNotExist:\n return username\n raise forms.ValidationError('username \"%s\" is already in use' % account.username)\n \n def clean_firstname(self):\n if self.is_valid():\n first_name = self.cleaned_data['first_name']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(first_name=first_name)\n except Account.DoesNotExist:\n return first_name\n raise forms.ValidationError('first name \"%s\" is already in use' % account.first_name)\n\n def clean_lastname(self):\n if self.is_valid():\n last_name = self.cleaned_data['last_name']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(last_name=last_name)\n except Account.DoesNotExist:\n return last_name\n raise forms.ValidationError('last name \"%s\" is already in use' % account.last_name)\n\n", "sub_path": "authenticate/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.ImageField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Account", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Account", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 42, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 43, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 45, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 45, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 51, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 52, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 54, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 54, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 60, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 61, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 63, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 69, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 72, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "478557542", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('report_builder', '0004_auto_20160906_1149'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='report',\n name='chart_type',\n field=models.IntegerField(null=True, blank=True),\n ),\n ]\n", "sub_path": "report_builder/migrations/0005_report_chart_type.py", "file_name": "0005_report_chart_type.py", "file_ext": "py", "file_size_in_byte": 422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "340434745", "text": "import ConfigSpace\nimport numpy as np\nimport numpy.random\n\ndef make_config_compatible(config, config_space):\n if isinstance(config, dict):\n config = config\n else:\n config = config.get_dictionary()\n\n # remove illegal values\n config = {k: v for k, v in config.items() \n if k in config_space.get_hyperparameter_names()\n and config_space.get_hyperparameter(k).is_legal(v)}\n\n # add values missing for current config space: random value\n for hp in config_space.get_hyperparameters():\n if hp.name not in config and isinstance(hp, ConfigSpace.hyperparameters.Constant):\n config[hp.name] = hp.value\n elif hp.name not in config:\n config[hp.name] = hp.sample(config_space.random)\n\n # delete values for inactive hyperparameters\n config = ConfigSpace.util.deactivate_inactive_hyperparameters(\n\t\t\t\t\t\t\t\t\tconfiguration_space=config_space,\n\t\t\t\t\t\t\t\t\tconfiguration=config)\n return ConfigSpace.Configuration(config_space, config)\n\ndef make_bw_compatible(bw, from_configspace, to_configspace):\n bw = insert_constant(bw, from_configspace)\n result = np.zeros(len(to_configspace.get_hyperparameter_names()))\n for i in range(len(bw)):\n j = transform_hyperparameter_index(i, from_configspace, to_configspace)\n if j is not None:\n result[j] = bw[i]\n return filter_constant(result, to_configspace)\n\ndef make_vector_compatible(vector, from_configspace, to_configspace, imputer):\n vector = np.asanyarray(vector)\n vector = insert_constant(vector, from_configspace)\n x = np.array(vector).reshape((-1, len(from_configspace.get_hyperparameters())))\n c = np.zeros((x.shape[0], len(to_configspace.get_hyperparameters()))) * np.nan\n\n # copy given values at correct index\n for i in range(x.shape[1]):\n j = transform_hyperparameter_index(i, from_configspace, to_configspace)\n if j is not None:\n c[:, j] = transform_hyperparameter(from_configspace, to_configspace, i, j, x[:, i])\n return imputer(filter_constant(c, to_configspace))\n\ndef transform_hyperparameter_index(idx, from_configspace, to_configspace):\n hp_name = from_configspace.get_hyperparameter_by_idx(idx)\n try:\n return to_configspace.get_idx_by_hyperparameter_name(hp_name)\n except:\n return None\n\ndef transform_hyperparameter(from_configspace, to_configspace, from_idx, to_idx, vector):\n from_hp = from_configspace.get_hyperparameter(from_configspace.get_hyperparameter_by_idx(from_idx))\n to_hp = to_configspace .get_hyperparameter(to_configspace .get_hyperparameter_by_idx(to_idx))\n result = np.ones(vector.shape) * np.nan\n for i, v in enumerate(vector):\n try:\n transformed = from_hp._transform(v)\n except:\n print(\"\\nvalue:\", v)\n print(\"hp:\", from_hp)\n print(\"to hp:\", to_hp)\n raise\n transformed = transformed[0] if isinstance(transformed, np.ndarray) else transformed\n if to_hp.is_legal(transformed):\n result[i] = to_hp._inverse_transform(transformed)\n return result\n\n\ndef num_non_constant_hps(cs):\n return np.sum(~constant_hypers(cs))\n\n\ndef filter_constant(array, cs):\n if len(array.shape) == 1:\n return array[~constant_hypers(cs)]\n else:\n return array[:, ~constant_hypers(cs)]\n\n\ndef constant_hypers(cs):\n constant_idxs = []\n for hyper in cs.get_hyperparameters():\n idx = cs.get_idx_by_hyperparameter_name(hyper.name)\n if is_constant(hyper):\n constant_idxs.append(idx)\n return np.array([i in constant_idxs for i in range(len(cs.get_hyperparameters()))])\n\n\ndef is_constant(hyper):\n if isinstance(hyper, ConfigSpace.hyperparameters.Constant):\n return True\n\n elif isinstance(hyper, ConfigSpace.hyperparameters.CategoricalHyperparameter):\n if len(hyper.choices) == 1:\n return True\n\n return False\n\n\ndef insert_constant(array, cs):\n if len(array.shape) == 1:\n result = np.zeros(len(cs.get_hyperparameters()))\n else:\n result = np.zeros((array.shape[0], len(cs.get_hyperparameters())))\n\n non_constant_pointer = 0\n for i, constant in enumerate(constant_hypers(cs)):\n if not constant and len(array.shape) == 1:\n result[i] = array[non_constant_pointer]\n non_constant_pointer += 1\n elif not constant:\n result[:, i] = array[:, non_constant_pointer]\n non_constant_pointer += 1\n return result\n", "sub_path": "hpbandster/metalearning/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 4490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ConfigSpace.hyperparameters", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ConfigSpace.util.deactivate_inactive_hyperparameters", "line_number": 24, "usage_type": "call"}, {"api_name": "ConfigSpace.util", "line_number": 24, "usage_type": "attribute"}, {"api_name": "ConfigSpace.Configuration", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "ConfigSpace.hyperparameters", "line_number": 97, "usage_type": "attribute"}, {"api_name": "ConfigSpace.hyperparameters", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "475562155", "text": "from django.core.management.base import BaseCommand\n\nimport log\n\nfrom elections.models import District, DistrictCategory, Party\n\n\nclass Command(BaseCommand):\n help = \"Migrate data between existing models and initialize constants\"\n\n def handle(self, verbosity: int, **_kwargs):\n log.init(reset=True, verbosity=verbosity)\n\n self.initialize_parties()\n self.initialize_districts()\n\n def initialize_parties(self):\n for name, color in [\n # Placeholders\n (\"Nonpartisan\", '#999'),\n (\"No Party Affiliation\", '#999'),\n # Parties\n (\"Democratic\", '#3333FF'),\n (\"Green\", '#00A95C'),\n (\"Libertarian\", '#ECC850'),\n (\"Natural Law\", '#FFF7D6'),\n (\"Republican\", '#E81B23'),\n (\"U.S. Taxpayers\", '#A356DE'),\n (\"Working Class\", '#A30000'),\n ]:\n party, created = Party.objects.update_or_create(\n name=name, defaults=dict(color=color)\n )\n if created:\n self.stdout.write(f'Added party: {party}')\n\n def initialize_districts(self):\n state, created = DistrictCategory.objects.get_or_create(name=\"State\")\n if created:\n self.stdout.write(f'Added district category: {state}')\n\n for name in [\n # State\n \"County\",\n \"Jurisdiction\",\n \"Precinct\",\n # Local\n \"City\",\n \"District Library\",\n \"Local School\",\n \"Intermediate School\",\n \"Township\",\n \"Metropolitan\",\n \"Village\",\n \"Authority\",\n \"Library\",\n ]:\n category, created = DistrictCategory.objects.get_or_create(name=name)\n if created:\n self.stdout.write(f'Added district category: {category}')\n\n michigan, created = District.objects.get_or_create(\n category=state, name=\"Michigan\"\n )\n if created:\n self.stdout.write(f'Added district: {michigan}')\n", "sub_path": "elections/management/commands/migrate_data.py", "file_name": "migrate_data.py", "file_ext": "py", "file_size_in_byte": 2064, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 8, "usage_type": "name"}, {"api_name": "log.init", "line_number": 12, "usage_type": "call"}, {"api_name": "elections.models.Party.objects.update_or_create", "line_number": 31, "usage_type": "call"}, {"api_name": "elections.models.Party.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "elections.models.Party", "line_number": 31, "usage_type": "name"}, {"api_name": "elections.models.DistrictCategory.objects.get_or_create", "line_number": 38, "usage_type": "call"}, {"api_name": "elections.models.DistrictCategory.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "elections.models.DistrictCategory", "line_number": 38, "usage_type": "name"}, {"api_name": "elections.models.DistrictCategory.objects.get_or_create", "line_number": 58, "usage_type": "call"}, {"api_name": "elections.models.DistrictCategory.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "elections.models.DistrictCategory", "line_number": 58, "usage_type": "name"}, {"api_name": "elections.models.District.objects.get_or_create", "line_number": 62, "usage_type": "call"}, {"api_name": "elections.models.District.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "elections.models.District", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "134713025", "text": "import os\nimport requests\nimport datetime as dt\nfrom twilio.rest import Client\n\n\nSTOCK = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\nSTOCK_FLUCTUATION_VALUE = 0.5 # %\nDATE_YESTERDAY = dt.date.today() - dt.timedelta(1)\nDATE_DAY_BEFORE_YESTERDAY = dt.date.today() - dt.timedelta(2)\n# API keys\nSTOCK_API_KEY = os.environ.get(\"STOCK_API_KEY\")\nNEWS_API_KEY = os.environ.get(\"NEWS_API_KEY\")\nSMS_API_KEY = os.environ.get(\"TWILIO_API_KEY\")\n# Twilio settings\nTWILIO_ACCOUNT_SID = os.environ.get(\"TWILIO_ACCOUNT_SID\")\nTWILIO_AUTH_TOKEN = os.environ.get(\"TWILIO_AUTH_TOKEN\")\n\n\n## STEP 1: Use https://www.alphavantage.co\n# When STOCK price increase/decreases by 5% between yesterday and the day before yesterday then print(\"Get News\").\ndef get_stock_price_change():\n\n parameters = {\n \"function\": \"TIME_SERIES_DAILY\",\n \"symbol\": STOCK,\n \"apikey\": STOCK_API_KEY,\n }\n\n response = requests.get(url=\"https://www.alphavantage.co/query\", params=parameters)\n response.raise_for_status()\n content = response.json()[\"Time Series (Daily)\"]\n diference = float(content[str(DATE_YESTERDAY)][\"4. close\"]) - float(\n content[str(DATE_DAY_BEFORE_YESTERDAY)][\"4. close\"]\n )\n variation = (diference / float(content[str(DATE_YESTERDAY)][\"4. close\"])) * 100\n return round(variation, 2)\n\n\n## STEP 2: Use https://newsapi.org\n# Instead of printing (\"Get News\"), actually get the first 3 news pieces for the COMPANY_NAME.\ndef get_company_news():\n\n parameters = {\n \"q\": COMPANY_NAME,\n \"from\": DATE_YESTERDAY,\n \"sortBy\": \"popularity\",\n \"apikey\": NEWS_API_KEY,\n }\n\n response = requests.get(url=\"https://newsapi.org/v2/everything\", params=parameters)\n response.raise_for_status()\n content = response.json()[\"articles\"]\n return f\"Headline: {content[0]['title']}\\nBrief: {content[0]['description']}\\nLink: {content[0]['url']}\"\n\n\n## STEP 3: Use https://www.twilio.com\n# Send a seperate message with the percentage change and each article's title and description to your phone number.\ndef send_sms(stock_fluctuation, news):\n\n if stock_fluctuation > 0:\n content = f\"{STOCK}: ⬆️{abs(stock_fluctuation)}%\\n{news}\"\n else:\n content = f\"{STOCK}: ⬇️{abs(stock_fluctuation)}%\\n{news}\"\n\n client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)\n message = client.messages.create(\n body=content,\n from_=\"\", # Twilio free trial phone number\n to=\"\", # Your phone number, that was used in twilio\n )\n print(message.sid)\n\n\nif __name__ == \"__main__\":\n stock_fluctuation = get_stock_price_change()\n print(stock_fluctuation)\n if abs(stock_fluctuation) >= STOCK_FLUCTUATION_VALUE:\n news = get_company_news()\n print(news)\n send_sms(stock_fluctuation, news)\n", "sub_path": "python/100_Days_of_Code/Intermediate+/day36/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.date.today", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 17, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "twilio.rest.Client", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "310271753", "text": "from django.urls import path\nfrom . import views\n\napp_name = \"blog\"\n\nurlpatterns = [\n path('', views.index_view, name=\"index\"),\n path('new_post/', views.new_post_view, name='new_post'),\n path('list/', views.PostListView.as_view(), name='list'),\n path('list//', views.PostDetailView.as_view(), name='read_post'),\n path('create/', views.PostCreateView.as_view(), name='create'),\n path('update//', views.PostUpdateView.as_view(), name='update'),\n path('delete//', views.PostDeleteView.as_view(), name='delete'),\n]\n", "sub_path": "blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "53710815", "text": "import re\r\n\r\nfrom django.db.transaction import set_autocommit, rollback, commit\r\n\r\nfrom account.models import City, School, Tag, Profile, ProfileHighSchoolInfo, \\\r\n ProfileCollegeInfo, ProfileTagVote, ContactRecord\r\n\r\n\r\ndef bulk_create(file):\r\n from openpyxl import load_workbook\r\n \r\n wb = load_workbook(file, use_iterators = True)\r\n ws = wb.worksheets[0]\r\n row_counter = 1\r\n \r\n city_dict = dict([(city.name, city) for city in City.objects.all()])\r\n high_school_dict = dict([(s.name, s) for s in School.objects.filter(type=School.HIGH_SCHOOL_TYPE_INDEX)])\r\n college_dict = dict([(s.name, s) for s in School.objects.filter(type=School.COLLEGE_TYPE_INDEX)])\r\n tag_dict = dict([(t.name, t) for t in Tag.objects.all()])\r\n num_extract_regex = re.compile('(?<=[^\\d])\\d+(?=[^\\d])')\r\n mobile_profile_dict = {}\r\n index_profile_dict = {}\r\n raw_encrypted_dict = {}\r\n def set_mobile_field(profile, raw_mobile_num, mobile_field_name): \r\n if raw_mobile_num:\r\n profile.set_mobile(raw_mobile_num, mobile_field_name)\r\n raw_encrypted_dict[raw_mobile_num] = getattr(profile, mobile_field_name)\r\n \r\n def save_profile_dict(profile, mobile_field_name, mobile_num):\r\n if getattr(profile, mobile_field_name):\r\n mobile_profile_dict[mobile_num] = profile\r\n\r\n set_autocommit(False)\r\n for row in ws.iter_rows(row_offset=1): # it brings a new method: iter_rows()\r\n row_counter = row_counter + 1\r\n try:\r\n p = Profile()\r\n set_mobile_field(p, row[0].value, Profile.MOBBILE0_FIELD)\r\n set_mobile_field(p, row[1].value, Profile.MOBBILE1_FIELD)\r\n set_mobile_field(p, row[2].value, Profile.MOBBILE2_FIELD)\r\n p.date_of_birth = row[3].value\r\n p.save()\r\n save_profile_dict(p, Profile.MOBBILE0_FIELD, row[0].value)\r\n save_profile_dict(p, Profile.MOBBILE1_FIELD, row[1].value)\r\n save_profile_dict(p, Profile.MOBBILE2_FIELD, row[2].value)\r\n index_profile_dict[row_counter] = p\r\n if city_dict.get(row[4].value, None):\r\n p.cities.add(city_dict.get(row[4].value))\r\n if city_dict.get(row[5].value, None):\r\n p.cities.add(city_dict.get(row[5].value))\r\n if high_school_dict.get(row[6].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': high_school_dict.get(row[6].value),\r\n 'date_joined': row[7].value,\r\n 'date_graduated': row[8].value,\r\n }\r\n phsi = ProfileHighSchoolInfo(**kwarg)\r\n phsi.save()\r\n if high_school_dict.get(row[9].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': high_school_dict.get(row[9].value),\r\n 'date_joined': row[10].value,\r\n 'date_graduated': row[11].value,\r\n }\r\n phsi = ProfileHighSchoolInfo(**kwarg)\r\n phsi.save()\r\n if college_dict.get(row[12].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': college_dict.get(row[12].value),\r\n 'date_joined': row[13].value,\r\n 'date_graduated': row[14].value,\r\n }\r\n pci = ProfileCollegeInfo(**kwarg)\r\n pci.save()\r\n if college_dict.get(row[15].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': college_dict.get(row[15].value),\r\n 'date_joined': row[16].value,\r\n 'date_graduated': row[17].value,\r\n }\r\n pci = ProfileCollegeInfo(**kwarg)\r\n pci.save()\r\n if college_dict.get(row[18].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': college_dict.get(row[18].value),\r\n 'date_joined': row[19].value,\r\n 'date_graduated': row[20].value,\r\n }\r\n pci = ProfileCollegeInfo(**kwarg)\r\n pci.save()\r\n \r\n tags = re.split('[,锛� ]+', row[21].value)\r\n for tag in tags:\r\n tag = tag.strip()\r\n if not tag:\r\n continue\r\n vote = num_extract_regex.findall(tag)\r\n vote = int(vote[0]) if len(vote) else 0\r\n tag = re.split('[\\(锛� ]+', tag)[0]\r\n if tag_dict.get(tag, None):\r\n ptv = ProfileTagVote(profile=p, tag=tag_dict.get(tag), count=vote)\r\n ptv.save()\r\n except Exception as e:\r\n e.row_counter = row_counter\r\n rollback()\r\n raise e\r\n \r\n row_counter = 1\r\n for row in ws.iter_rows(row_offset=1): # it brings a new method: iter_rows()\r\n row_counter = row_counter + 1\r\n try:\r\n mobile_nums = re.split('[,锛� ]+', row[22].value)\r\n for mobile_num in mobile_nums:\r\n mobile_num = mobile_num.strip()\r\n if mobile_profile_dict.get(mobile_num):\r\n ContactRecord(from_profile=index_profile_dict.get(row_counter),\r\n to_profile=mobile_profile_dict.get(mobile_num),\r\n encrypted_mobile=raw_encrypted_dict.get(mobile_num)).save()\r\n except Exception as e:\r\n e.row_counter = row_counter\r\n rollback()\r\n raise e\r\n \r\n commit()\r\n return row_counter\r\n", "sub_path": "account/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 12, "usage_type": "call"}, {"api_name": "account.models.City.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "account.models.City.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "account.models.City", "line_number": 16, "usage_type": "name"}, {"api_name": "account.models.School.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "account.models.School.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "account.models.School", "line_number": 17, "usage_type": "name"}, {"api_name": "account.models.School.HIGH_SCHOOL_TYPE_INDEX", "line_number": 17, "usage_type": "attribute"}, {"api_name": "account.models.School.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "account.models.School.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "account.models.School", "line_number": 18, "usage_type": "name"}, {"api_name": "account.models.School.COLLEGE_TYPE_INDEX", "line_number": 18, "usage_type": "attribute"}, {"api_name": "account.models.Tag.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "account.models.Tag.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "account.models.Tag", "line_number": 19, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.transaction.set_autocommit", "line_number": 33, "usage_type": "call"}, {"api_name": "account.models.Profile", "line_number": 37, "usage_type": "call"}, {"api_name": "account.models.Profile.MOBBILE0_FIELD", "line_number": 38, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 38, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE1_FIELD", "line_number": 39, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 39, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE2_FIELD", "line_number": 40, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 40, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE0_FIELD", "line_number": 43, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 43, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE1_FIELD", "line_number": 44, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 44, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE2_FIELD", "line_number": 45, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 45, "usage_type": "name"}, {"api_name": "account.models.ProfileHighSchoolInfo", "line_number": 58, "usage_type": "call"}, {"api_name": "account.models.ProfileHighSchoolInfo", "line_number": 67, "usage_type": "call"}, {"api_name": "account.models.ProfileCollegeInfo", "line_number": 76, "usage_type": "call"}, {"api_name": "account.models.ProfileCollegeInfo", "line_number": 85, "usage_type": "call"}, {"api_name": "account.models.ProfileCollegeInfo", "line_number": 94, "usage_type": "call"}, {"api_name": "re.split", "line_number": 97, "usage_type": "call"}, {"api_name": "re.split", "line_number": 104, "usage_type": "call"}, {"api_name": "account.models.ProfileTagVote", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.transaction.rollback", "line_number": 110, "usage_type": "call"}, {"api_name": "re.split", "line_number": 117, "usage_type": "call"}, {"api_name": "account.models.ContactRecord", "line_number": 121, "usage_type": "call"}, {"api_name": "django.db.transaction.rollback", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.transaction.commit", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "114313541", "text": "from threading import Thread\nfrom threading import Lock\nfrom colorlog import ColoredFormatter\nimport logging\n\n\nformatter = ColoredFormatter(\n\t'%(log_color)s[%(asctime)-8s] %(module)s (%(process)d %(threadName)s): %(message_log_color)s%(message)s',\n\tdatefmt=None,\n\treset=True,\n\tlog_colors={\n\t\t'DEBUG': 'blue',\n\t\t'INFO': 'green',\n\t\t'WARNING': 'yellow',\n\t\t'ERROR': 'red',\n\t\t'CRITICAL': 'red',\n\t},\n\tsecondary_log_colors={\n\t\t'message': {\n\t\t\t'DEBUG': 'purple',\n\t\t\t'INFO': 'yellow',\n\t\t\t'WARNING': 'green',\n\t\t\t'ERROR': 'yellow',\n\t\t\t'CRITICAL': 'red',\n\t\t}\n\t},\n\tstyle = '%'\n)\n\nstream = logging.StreamHandler()\nstream.setFormatter(formatter)\n\nlogger = logging.getLogger('pastry.py')\nlogger.addHandler(stream)\nlogger.setLevel (logging.DEBUG)\n\n\nclass Node:\n\tdef __init__ (self, port, auth):\n\t\tself.port = port\n\t\tself.auth = auth\n\t\tlogger.debug ('listening on port %d', self.port)\n\n\tdef bootstrap (self, seeds = []):\n\t\tfor seed in seeds:\n\t\t\tlogger.debug ('bootstraping from %s', str (seed))\n\n\tdef get (self, key):\n\t\tpass\n\n\tdef set (self, key, data):\n\t\tpass\n\n\n\t\n\t\n", "sub_path": "pastry/node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 1071, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "colorlog.ColoredFormatter", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "73944803", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 27 21:02:50 2018\r\n\r\n@author: CS\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport numpy as np\r\nnp.random.seed(1337) # for reproducibility\r\n\r\nfrom keras.datasets import mnist\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Activation\r\nfrom keras.optimizers import SGD, Adam, RMSprop\r\nfrom keras.utils import np_utils\r\nfrom keras.callbacks import ModelCheckpoint\r\nimport tensorflow as tf \r\nfrom keras import backend as k\r\n\r\n'''def categorical_hinge(y_true, y_pred):\r\n pos = k.sum(y_true * y_pred, axis=-1)\r\n neg =k.max((1.0 - y_true) * y_pred, axis=-1)\r\n return k.mean(k.maximum(0.0, neg - pos + 1), axis=-1)\r\n'''\r\n\r\n\r\n# Data Preparing\r\n\r\nbatch_size = 128\r\nnr_classes = 10 #62\r\nnr_iterations = 100\r\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\r\n\r\nX_train = X_train.reshape(60000, 784) #Done\r\nX_test = X_test.reshape(10000, 784) #Done\r\nX_train = X_train.astype('float32') #Done\r\nX_test = X_test.astype('float32') #Done\r\nX_train /= 255 #Done\r\nX_test /= 255 #Done\r\n\r\n\r\nY_train = np_utils.to_categorical(y_train, nr_classes) # ??\r\nY_test = np_utils.to_categorical(y_test, nr_classes) # ??\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(10, input_shape=(784,)))\r\n\r\nX_val=X_train[0:10000,:]\r\nY_val=Y_train[0:10000,:]\r\n\r\nX_train=X_train[10000:60000,:]\r\nY_train=Y_train[10000:60000,:]\r\n\r\n\r\nmodel.add(Activation('softmax'))\r\nmodel.summary()\r\nmodel.compile(loss='hinge',\r\n optimizer='sgd',\r\n metrics=['accuracy'])\r\n\r\nsaved_weights_name='SVMWeights.h5'\r\n\r\ncheckpoint = ModelCheckpoint(saved_weights_name, \r\n monitor='val_acc', \r\n verbose=1, \r\n save_best_only=True, \r\n mode='max')\r\n\r\nhistory = model.fit(X_train, Y_train,\r\n batch_size = batch_size, nb_epoch = nr_iterations,\r\n verbose = 1, validation_data = (X_val, Y_val) ,callbacks=[checkpoint])\r\n\r\nscore = model.evaluate(X_test, Y_test, verbose = 0)", "sub_path": "SVM.py", "file_name": "SVM.py", "file_ext": "py", "file_size_in_byte": 2101, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "keras.datasets.mnist.load_data", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 33, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 43, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 44, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "215193213", "text": "#!/usr/bin/env python3\nfrom setuptools import setup\n\nwith open(\"README.md\", encoding='utf8') as readme:\n long_description = readme.read()\n\nsetup(\n name=\"HawkEye\",\n version=\"1.0\",\n author=\"Abdallah Elshinbary\",\n url=\"https://github.com/N1ght-W0lf/HawkEye\",\n description=(\"Malware dynamic instrumentation tool based on frida framework\"),\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"psutil\",\n \"frida\",\n ],\n packages=[\"hawkeye\",\n ],\n entry_points={\n \"console_scripts\": [\n \"hawkeye = hawkeye.HawkEye:main\",\n ],\n },\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "497120263", "text": "from django import forms\nfrom django.forms import SelectDateWidget\nfrom ipdb import set_trace\n\nfrom blog.models import Post\n\n\nclass CreatePostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title', 'text', 'published_date')\n\n def __init__(self, user, *args, **kwargs):\n super().__init__(auto_id='custom_id_%s', *args, **kwargs)\n self.user = user\n self.fields['published_date'].widget = SelectDateWidget()\n self.fields['title'].widget = forms.TextInput(attrs={'placeholder': 'place title here'})\n self.fields['title'].initial = \"POST No: {}\".format(Post.objects.count() + 1)\n\n def save(self, commit=True):\n post = self.instance\n post.author = self.user\n super().save()\n\n\nclass UpdatePostForm(forms.ModelForm):\n\n class Meta:\n model = Post\n fields = ('title', 'text', 'published_date')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['published_date'].widget = SelectDateWidget()\n\n\n\nfrom blog.signals import *\n", "sub_path": "blog/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "blog.models.Post", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.SelectDateWidget", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms.TextInput", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "blog.models.Post.objects.count", "line_number": 18, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "blog.models.Post", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.SelectDateWidget", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "401234362", "text": "#!/usr/bin/python\nimport argparse\nimport json\nimport web3\nimport sys\nimport logging\n# import pymongo\nimport progressbar\n\nfrom pymongo import MongoClient\nfrom bson import Decimal128\n\nfrom mnemonic import Mnemonic\n\nfrom datetime import datetime\nfrom web3 import Web3\nfrom hexbytes import HexBytes\n\nfrom helper import query_yes_no\n\nlogging.basicConfig(level=logging.INFO)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-d', '--database', type=str, help='Name of the MongoDB database', required=True)\nparser.add_argument('-s', '--start-block', type=int, help='Start block')\nparser.add_argument('-e', '--end-block', type=int, help='End block')\nparser.add_argument('--drop', action='store_true', help='Drop existing DB before scraping')\nparser.add_argument('--skip-confirmation', action='store_true', help='Skip asking for confirmation for dropping the DB')\n\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-a', '--addr', type=str, help='Comma-separated list of addresses from and to which txs will be filtered')\ngroup.add_argument('-f', '--file', type=str, help='File containing addresses from and to which txs will be filtered')\n\n\ndef tx_to_dict(tx):\n result = {}\n for key, val in tx.items():\n if isinstance(val, HexBytes):\n result[key] = val.hex()\n else:\n result[key] = val\n\n if 'value' in result: result['value'] = Decimal128(str(result['value']))\n if 'gasPrice' in result: result['gasPrice'] = Decimal128(str(result['gasPrice']))\n\n return result\n\ndef block_to_dict(tx):\n result = {}\n for key, val in tx.items():\n if isinstance(val, HexBytes):\n result[key] = val.hex()\n else:\n result[key] = val\n\n if 'difficulty' in result: result['difficulty'] = Decimal128(str(result['difficulty']))\n if 'totalDifficulty' in result: result['totalDifficulty'] = Decimal128(str(result['totalDifficulty']))\n\n return result\n\n\ndef __main__():\n args = parser.parse_args()\n\n provider = Web3.WebsocketProvider('wss://mainnet.infura.io/ws/')\n # provider = Web3.HTTPProvider('https://mainnet.infura.io/')\n # provider = Web3.IPCProvider()\n w3 = Web3(provider)\n\n if args.start_block:\n start_block = args.start_block\n else:\n start_block = 0\n\n if args.end_block:\n end_block = args.end_block\n else:\n end_block = w3.eth.blockNumber\n\n client = MongoClient()\n\n dbnames = client.list_database_names()\n\n if args.drop and args.database in dbnames:\n if not args.skip_confirmation:\n if not query_yes_no('Are you sure you want to drop existing DB: '+args.database, default='no'):\n sys.exit()\n\n client.drop_database(args.database)\n\n db = client[args.database]\n\n block_collection = db['blocks']\n tx_collection = db['transactions']\n txreceipt_collection = db['txreceipts']\n\n filtered_addrs = []\n if args.addr:\n filtered_addrs += args.addr.split(',')\n elif args.file:\n filtered_addrs += open(args.file, 'r').read().split('\\n')\n\n filtered_addrs = [i.lower() for i in filtered_addrs if Web3.isAddress(i)]\n\n bar = progressbar.ProgressBar(max_value=end_block-start_block)\n\n tx_count = 0\n\n for idx in range(start_block, end_block+1):\n bar.update(idx-start_block)\n\n block = w3.eth.getBlock(idx, full_transactions=True)\n\n block_without_tx = block_to_dict(block)\n if 'transactions' in block_without_tx:\n del block_without_tx['transactions']\n\n block_collection.insert_one(block_without_tx)\n\n txs = block.transactions\n\n lines = []\n\n for n, tx in enumerate(txs):\n if tx['to']:\n to_matches = tx['to'].lower() in filtered_addrs\n else:\n to_matches = False\n\n if tx['from']:\n from_matches = tx['from'].lower() in filtered_addrs\n else:\n from_matches = False\n\n if to_matches or from_matches or filtered_addrs == []:\n # print('Found tx: %s'%tx['hash'].hex())\n\n tx_collection.insert_one(tx_to_dict(tx))\n\n tx_receipt = w3.eth.getTransactionReceipt(tx['hash'])\n txreceipt_collection.insert_one(tx_to_dict(tx_receipt))\n\n tx_count += 1\n\n bar.finish()\n txreceipt_collection.create_index('transactionHash')\n\n logging.info('Finished importing %d txs from %d blocks'%(tx_count, end_block-start_block))\n\n # if len(lines) > 0:\n # if args.readable:\n # ofile.write('// Block %d at %s including %d txs, %d unique addresses, diversity: %d%%, gas used: %d\\n'%(block.number, datetime.fromtimestamp(block.timestamp), len(block.transactions), len(unique_addresses), diversity*100, block.gasUsed))\n\n\nif __name__ == '__main__':\n __main__()\n", "sub_path": "scrape_txs_node.py", "file_name": "scrape_txs_node.py", "file_ext": "py", "file_size_in_byte": 4819, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "hexbytes.HexBytes", "line_number": 38, "usage_type": "argument"}, {"api_name": "bson.Decimal128", "line_number": 43, "usage_type": "call"}, {"api_name": "bson.Decimal128", "line_number": 44, "usage_type": "call"}, {"api_name": "hexbytes.HexBytes", "line_number": 51, "usage_type": "argument"}, {"api_name": "bson.Decimal128", "line_number": 56, "usage_type": "call"}, {"api_name": "bson.Decimal128", "line_number": 57, "usage_type": "call"}, {"api_name": "web3.Web3.WebsocketProvider", "line_number": 65, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 65, "usage_type": "name"}, {"api_name": "web3.Web3", "line_number": 68, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 80, "usage_type": "call"}, {"api_name": "helper.query_yes_no", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 87, "usage_type": "call"}, {"api_name": "web3.Web3.isAddress", "line_number": 103, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 103, "usage_type": "name"}, {"api_name": "progressbar.ProgressBar", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "195318413", "text": "import luigi\n\nfrom ...abstract_method_exception import AbstractMethodException\nfrom ...lib.test_environment.populate_data import PopulateEngineSmallTestDataToDatabase\nfrom ...lib.test_environment.upload_exa_jdbc import UploadExaJDBC\nfrom ...lib.test_environment.upload_virtual_schema_jdbc_adapter import UploadVirtualSchemaJDBCAdapter\nfrom ...lib.base.dependency_logger_base_task import DependencyLoggerBaseTask\nfrom ...lib.data.container_info import ContainerInfo\nfrom ...lib.data.database_credentials import DatabaseCredentialsParameter\nfrom ...lib.data.database_info import DatabaseInfo\nfrom ...lib.data.docker_network_info import DockerNetworkInfo\nfrom ...lib.data.environment_info import EnvironmentInfo\nfrom ...lib.test_environment.general_spawn_test_environment_parameter import \\\n GeneralSpawnTestEnvironmentParameter\nfrom ...lib.test_environment.spawn_test_container import SpawnTestContainer\n\nDATABASE = \"database\"\n\nTEST_CONTAINER = \"test_container\"\n\n\nclass AbstractSpawnTestEnvironment(DependencyLoggerBaseTask,\n GeneralSpawnTestEnvironmentParameter,\n DatabaseCredentialsParameter):\n environment_name = luigi.Parameter()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.test_container_name = f\"\"\"test_container_{self.environment_name}\"\"\"\n self.network_name = f\"\"\"db_network_{self.environment_name}\"\"\"\n\n def get_environment_type(self):\n raise AbstractMethodException()\n\n def run_task(self):\n test_environment_info = yield from self._attempt_database_start()\n yield from self._setup_test_database(test_environment_info)\n self.return_object(test_environment_info)\n\n def _attempt_database_start(self):\n is_database_ready = False\n attempt = 0\n database_info = None\n test_container_info = None\n while not is_database_ready and attempt < self.max_start_attempts:\n network_info, database_info, is_database_ready, test_container_info = \\\n yield from self._start_database(attempt)\n attempt += 1\n if not is_database_ready and not attempt < self.max_start_attempts:\n raise Exception(f\"Maximum attempts {attempt} to start the database reached.\")\n test_environment_info = \\\n EnvironmentInfo(name=self.environment_name,\n env_type=self.get_environment_type(),\n database_info=database_info,\n test_container_info=test_container_info,\n network_info=network_info)\n return test_environment_info\n\n def _start_database(self, attempt):\n network_info = yield from self._create_network(attempt)\n database_info, test_container_info = \\\n yield from self._spawn_database_and_test_container(network_info, attempt)\n is_database_ready = yield from self._wait_for_database(\n database_info, test_container_info, attempt)\n return network_info, database_info, is_database_ready, test_container_info\n\n def _create_network(self, attempt):\n network_info_future = yield from self.run_dependencies(self.create_network_task(attempt))\n network_info = self.get_values_from_future(network_info_future)\n return network_info\n\n def create_network_task(self, attempt: int):\n raise AbstractMethodException()\n\n def _spawn_database_and_test_container(self,\n network_info: DockerNetworkInfo,\n attempt: int):\n database_and_test_container_info_future = \\\n yield from self.run_dependencies({\n TEST_CONTAINER: SpawnTestContainer(\n environment_name=self.environment_name,\n test_container_name=self.test_container_name,\n network_info=network_info,\n ip_address_index_in_subnet=1,\n reuse_test_container=self.reuse_test_container,\n no_test_container_cleanup_after_end=self.no_test_container_cleanup_after_end,\n attempt=attempt),\n DATABASE: self.create_spawn_database_task(network_info, attempt)\n })\n database_and_test_container_info = \\\n self.get_values_from_futures(database_and_test_container_info_future)\n test_container_info = database_and_test_container_info[TEST_CONTAINER]\n database_info = database_and_test_container_info[DATABASE]\n return database_info, test_container_info\n\n def create_spawn_database_task(self,\n network_info: DockerNetworkInfo,\n attempt: int):\n raise AbstractMethodException()\n\n def _wait_for_database(self,\n database_info: DatabaseInfo,\n test_container_info: ContainerInfo,\n attempt: int):\n database_ready_target_future = \\\n yield from self.run_dependencies(\n self.create_wait_for_database_task(\n attempt, database_info, test_container_info))\n is_database_ready = self.get_values_from_futures(database_ready_target_future)\n return is_database_ready\n\n def create_wait_for_database_task(self,\n attempt: int,\n database_info: DatabaseInfo,\n test_container_info: ContainerInfo):\n raise AbstractMethodException()\n\n def _setup_test_database(self, test_environment_info: EnvironmentInfo):\n # TODO check if database is setup\n if self.is_setup_database_activated:\n self.logger.info(\"Setup database\")\n upload_tasks = [\n self.create_child_task_with_common_params(\n UploadExaJDBC,\n test_environment_info=test_environment_info,\n reuse_uploaded=self.reuse_database_setup),\n self.create_child_task_with_common_params(\n UploadVirtualSchemaJDBCAdapter,\n test_environment_info=test_environment_info,\n reuse_uploaded=self.reuse_database_setup),\n self.create_child_task_with_common_params(\n PopulateEngineSmallTestDataToDatabase,\n test_environment_info=test_environment_info,\n reuse_data=self.reuse_database_setup\n )]\n yield from self.run_dependencies(upload_tasks)\n", "sub_path": "src/lib/test_environment/abstract_spawn_test_environment.py", "file_name": "abstract_spawn_test_environment.py", "file_ext": "py", "file_size_in_byte": 6644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "lib.base.dependency_logger_base_task.DependencyLoggerBaseTask", "line_number": 22, "usage_type": "name"}, {"api_name": "lib.test_environment.general_spawn_test_environment_parameter.GeneralSpawnTestEnvironmentParameter", "line_number": 23, "usage_type": "name"}, {"api_name": "lib.data.database_credentials.DatabaseCredentialsParameter", "line_number": 24, "usage_type": "name"}, {"api_name": "luigi.Parameter", "line_number": 25, "usage_type": "call"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 33, "usage_type": "call"}, {"api_name": "lib.data.environment_info.EnvironmentInfo", "line_number": 52, "usage_type": "call"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 73, "usage_type": "call"}, {"api_name": "lib.data.docker_network_info.DockerNetworkInfo", "line_number": 76, "usage_type": "name"}, {"api_name": "lib.test_environment.spawn_test_container.SpawnTestContainer", "line_number": 80, "usage_type": "call"}, {"api_name": "lib.data.docker_network_info.DockerNetworkInfo", "line_number": 97, "usage_type": "name"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 99, "usage_type": "call"}, {"api_name": "lib.data.database_info.DatabaseInfo", "line_number": 102, "usage_type": "name"}, {"api_name": "lib.data.container_info.ContainerInfo", "line_number": 103, "usage_type": "name"}, {"api_name": "lib.data.database_info.DatabaseInfo", "line_number": 114, "usage_type": "name"}, {"api_name": "lib.data.container_info.ContainerInfo", "line_number": 115, "usage_type": "name"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 116, "usage_type": "call"}, {"api_name": "lib.data.environment_info.EnvironmentInfo", "line_number": 118, "usage_type": "name"}, {"api_name": "lib.test_environment.upload_exa_jdbc.UploadExaJDBC", "line_number": 124, "usage_type": "argument"}, {"api_name": "lib.test_environment.upload_virtual_schema_jdbc_adapter.UploadVirtualSchemaJDBCAdapter", "line_number": 128, "usage_type": "argument"}, {"api_name": "lib.test_environment.populate_data.PopulateEngineSmallTestDataToDatabase", "line_number": 132, "usage_type": "argument"}]} +{"seq_id": "171826920", "text": "import os\nimport unittest\n\nfrom google.appengine.ext import testbed\n\n\nclass EnvVarsTestCase(unittest.TestCase):\n def setUp(self):\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.setup_env(\n app_id='your-app-id',\n my_config_setting='example',\n overwrite=True)\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def testEnvVars(self):\n assert os.environ['APPLICATION_ID'] == 'your-app-id'\n assert os.environ['MY_CONFIG_SETTING'] == 'example'\n\nif __name__ == '__main__':\n unittest.main()", "sub_path": "localtesting/test_env_vars.py", "file_name": "test_env_vars.py", "file_ext": "py", "file_size_in_byte": 596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.testbed.Testbed", "line_number": 9, "usage_type": "call"}, {"api_name": "google.appengine.ext.testbed", "line_number": 9, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "437665020", "text": "\"\"\"\nFlo Smart Home Water Control System for Home Assistant\nSee https://github.com/rsnodgrass/hass-flo-water\n\nFor good example of update, see Leaf sensor/switch:\nhttps://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/nissan_leaf/__init__.py\n\"\"\"\nimport logging\nimport json\nimport requests\nimport time\nimport datetime\nimport voluptuous as vol\nfrom requests.exceptions import HTTPError, ConnectTimeout\n\nfrom homeassistant.helpers import discovery\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.const import (\n CONF_USERNAME, CONF_PASSWORD, CONF_NAME, CONF_SCAN_INTERVAL)\nimport homeassistant.helpers.config_validation as cv\n\nfrom pyflowater import PyFlo\n\nLOG = logging.getLogger(__name__)\n\nFLO_DOMAIN = 'flo'\nFLO_SERVICE = 'flo_service'\n\nNOTIFICATION_ID = 'flo_notification'\n\nCONF_AUTO_DISCOVER = 'discovery'\nCONF_LOCATION_ID = 'location_id'\nCONF_STARTDATE = 'startdate'\n\nCONFIG_SCHEMA = vol.Schema({\n FLO_DOMAIN: vol.Schema({\n vol.Required(CONF_USERNAME): cv.string,\n vol.Required(CONF_PASSWORD): cv.string,\n # location_id: [ , , ... ]\n vol.Optional(CONF_LOCATION_ID): cv.ensure_list,\n vol.Optional(CONF_STARTDATE): cv.string\n })\n}, extra=vol.ALLOW_EXTRA)\n\n# cache expiry in minutes; TODO: make this configurable (with a minimum to prevent DDoS)\nFLO_CACHE_EXPIRY = 10\n\n\ndef setup(hass, config):\n \"\"\"Set up the Flo Water Control System\"\"\"\n\n conf = config[FLO_DOMAIN]\n username = conf.get(CONF_USERNAME)\n password = conf.get(CONF_PASSWORD)\n\n try:\n flo = PyFlo(username, password)\n if not flo.is_connected:\n LOG.error(f\"Could not connect to Flo service with user {username}\")\n return False\n\n # save password to enable automatic re-authentication while this HA instance is running\n flo.save_password(password)\n\n hass.data[FLO_SERVICE] = flo\n\n except (ConnectTimeout, HTTPError) as ex:\n LOG.error(f\"Unable to connect to Flo service: {str(ex)}\")\n hass.components.persistent_notification.create(\n f\"Error: {ex}
You will need to restart Home Assistant after fixing.\",\n title='Flo', notification_id=NOTIFICATION_ID\n )\n return False\n\n location_ids = conf.get(CONF_LOCATION_ID)\n startdate = conf.get(CONF_STARTDATE)\n\n # if no location is specified, this will auto discover ALL Flo locations/devices and add them to Home Assistant\n if location_ids == None:\n location_ids = []\n for location in flo.locations():\n location_ids.append(location['id'])\n LOG.info(\n f\"Discovered Flo location {location['id']} ({location['nickname']})\")\n\n # create sensors/switches for all configured locations\n for location_id in location_ids:\n discovery_info = {CONF_LOCATION_ID: location_id,\n CONF_STARTDATE: startdate}\n for component in ['switch', 'binary_sensor', 'sensor']:\n discovery.load_platform(\n hass, component, FLO_DOMAIN, discovery_info, config)\n\n return True\n\n\nclass FloEntity(Entity):\n \"\"\"Base Entity class for Flo water inflow control device\"\"\"\n\n def __init__(self, hass, device_id):\n \"\"\"Store service upon init.\"\"\"\n self._hass = hass\n self._flo = hass.data[FLO_SERVICE]\n self._device_id = device_id\n self._attrs = {\n 'device_id': device_id\n }\n\n @property\n def name(self):\n \"\"\"Return the display name for this sensor\"\"\"\n return self._name\n\n @property\n def should_poll(self):\n \"\"\"A coordinate manually updates all the sensors, so ensure polling ON for HA to detect state changes!\"\"\"\n # FIXME: we could make these dependent sensors not be polling, since the coordinator could let HA know what changes\n return True\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the device state attributes.\"\"\"\n return self._attrs\n\n @property\n def device_key(self):\n return f\"flo_device_{self._device_id}\"\n\n @property\n def device_state(self):\n return self._hass.data.get(self.device_key)\n\n def get_telemetry(self, field):\n if self.device_state:\n telemetry = self.device_state['telemetry']\n current_states = telemetry['current']\n return current_states[field]\n else:\n return None\n\n def update_state(self, state):\n self._state = state\n\n # For debugging, mark the attribute with current timestamp to indicate updated\n if self._attrs:\n now = datetime.datetime.now()\n self._attrs['last_updated'] = now.strftime(\"%m/%d/%Y %H:%M:%S\")\n\n\n\n\n\n async def refresh(self) -> bool:\n \"\"\"Refresh ecobee tokens and update config entry.\"\"\"\n _LOGGER.debug(\"Refreshing ecobee tokens and updating config entry\")\n result = await self._hass.async_add_executor_job(self.ecobee.refresh_tokens)\n if result == True:\n self._hass.config_entries.async_update_entry(\n self._entry,\n data={\n CONF_API_KEY: self.ecobee.config[ECOBEE_API_KEY],\n CONF_REFRESH_TOKEN: self.ecobee.config[ECOBEE_REFRESH_TOKEN],\n },\n )\n return True\n _LOGGER.error(\"Error refreshing ecobee tokens\")\n return False", "sub_path": "custom_components/flo/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 5425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 35, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 36, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 37, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_USERNAME", "line_number": 37, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 38, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_PASSWORD", "line_number": 38, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 40, "usage_type": "call"}, {"api_name": "voluptuous.Optional", "line_number": 41, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 37, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 37, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 38, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 38, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.ensure_list", "line_number": 40, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 40, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 41, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 41, "usage_type": "name"}, {"api_name": "voluptuous.ALLOW_EXTRA", "line_number": 43, "usage_type": "attribute"}, {"api_name": "homeassistant.const.CONF_USERNAME", "line_number": 53, "usage_type": "argument"}, {"api_name": "homeassistant.const.CONF_PASSWORD", "line_number": 54, "usage_type": "argument"}, {"api_name": "pyflowater.PyFlo", "line_number": 57, "usage_type": "call"}, {"api_name": "requests.exceptions.ConnectTimeout", "line_number": 67, "usage_type": "name"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 67, "usage_type": "name"}, {"api_name": "homeassistant.helpers.discovery.load_platform", "line_number": 91, "usage_type": "call"}, {"api_name": "homeassistant.helpers.discovery", "line_number": 91, "usage_type": "name"}, {"api_name": "homeassistant.helpers.entity.Entity", "line_number": 97, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "attribute"}]} +{"seq_id": "49153410", "text": "import falcon\nfrom wtforms import Form, ValidationError\nfrom wtforms.ext.sqlalchemy.orm import model_form\n\nfrom ccm.appointments.models import Appointment\nfrom ccm.patients.models import Patient\n\nfrom ccm.settings import session\n\n\ndef validate_patient(form, field):\n patient = session.query(Patient).get(field.data)\n if not patient:\n raise ValidationError('Patient with id={id} not found'.format(id=field.data))\n\n\nAppointmentForm = model_form(\n Appointment,\n Form,\n field_args={\n 'patient_id': {\n 'validators': [\n validate_patient\n ]\n }\n },\n exclude_fk=False\n)\n\n\ndef validate_appointment_create(req, resp, resource, params):\n data = req.context.get('doc')\n appointment_form = AppointmentForm(data=data)\n\n if not appointment_form.validate():\n raise falcon.HTTPBadRequest(\n 'Form Validation Error',\n appointment_form.errors\n )\n", "sub_path": "ccm/appointments/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ccm.settings.session.query", "line_number": 12, "usage_type": "call"}, {"api_name": "ccm.patients.models.Patient", "line_number": 12, "usage_type": "argument"}, {"api_name": "ccm.settings.session", "line_number": 12, "usage_type": "name"}, {"api_name": "wtforms.ValidationError", "line_number": 14, "usage_type": "call"}, {"api_name": "wtforms.ext.sqlalchemy.orm.model_form", "line_number": 17, "usage_type": "call"}, {"api_name": "ccm.appointments.models.Appointment", "line_number": 18, "usage_type": "argument"}, {"api_name": "wtforms.Form", "line_number": 19, "usage_type": "argument"}, {"api_name": "falcon.HTTPBadRequest", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "320423893", "text": "import json\nimport numpy as np\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.preprocessing import LabelEncoder\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\nfrom sklearn.cross_validation import train_test_split\n\n# Data preparation\nDATA_DIR = \"./data\"\nwith open(DATA_DIR + '/train.json', encoding='utf8') as data_file:\n train_data = json.load(data_file)\n \nX = [x['ingredients'] for x in train_data]\nX = [dict(zip(x, np.ones(len(x)))) for x in X]\n\nv = DictVectorizer()\nX = v.fit_transform(X)\nfeature_names = np.array(v.feature_names_)\n\nle = LabelEncoder()\ny = [y['cuisine'] for y in train_data]\ny = le.fit_transform(y).astype(np.int32)\nlabel_names = le.classes_\n\n# KNN\n\n# finding the best number of neighbors - 17\nX_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.3,\n random_state=10)\n\n# n_neighbors = range(1, 51, 2)\n# scores = []\n# for n in n_neighbors:\n# clf = KNN(n)\n# clf.fit(X_train, y_train)\n# scores.append(clf.score(X_test, y_test))\n\n# plt.figure(figsize=(20, 8))\n# plt.plot(n_neighbors, scores, linewidth=3.0)\n# plt.show()\n# \nscores = []\nidx = np.random.uniform(0, 1, X.shape[0]) >= 0.3\nfor n in range(5):\n np.random.shuffle(idx)\n X_train, X_test = X[idx], X[idx == False]\n y_train, y_test = y[idx], y[idx == False]\n clf = KNN(17, weights='uniform')\n clf.fit(X_train, y_train)\n scores.append(clf.score(X_test, y_test))", "sub_path": "code/whats-cooking.py", "file_name": "whats-cooking.py", "file_ext": "py", "file_size_in_byte": 1632, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.DictVectorizer", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "590586549", "text": "from opengever.document.archival_file import ArchivalFileConverter\nfrom opengever.dossier.docprops import DocPropertyWriter\nfrom zope.lifecycleevent import IObjectRemovedEvent\n\n\nDISABLE_DOCPROPERTY_UPDATE_FLAG = 'disable_docproperty_update'\n\n\ndef checked_out(context, event):\n _update_docproperties(context)\n\n\ndef before_documend_checked_in(context, event):\n _update_docproperties(context)\n\n\ndef document_moved_or_added(context, event):\n if IObjectRemovedEvent.providedBy(event):\n return\n\n if context.REQUEST.get(DISABLE_DOCPROPERTY_UPDATE_FLAG):\n return\n\n _update_docproperties(context)\n\n\ndef _update_docproperties(document):\n DocPropertyWriter(document).update()\n\n\ndef set_archival_file_state(context, event):\n # Because every filewidget is always marked as changed, in the event\n # descriptions, even when no file has changed, we have to check the request\n request = context.REQUEST\n\n if request.get('ACTUAL_URL').endswith('edit_archival_file'):\n field_name = 'archival_file'\n else:\n field_name = 'IDocumentMetadata.archival_file'\n\n fileupload = request.get('form.widgets.{}'.format(field_name))\n action = request.get('form.widgets.{}.action'.format(field_name), '')\n\n if bool(fileupload):\n ArchivalFileConverter(context).handle_manual_file_upload()\n\n file_removed = action == u'remove'\n file_removed_in_archival_form = isinstance(action, list) and u'remove' in action\n\n if file_removed or file_removed_in_archival_form:\n ArchivalFileConverter(context).remove_state()\n", "sub_path": "opengever/document/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 1564, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "zope.lifecycleevent.IObjectRemovedEvent.providedBy", "line_number": 18, "usage_type": "call"}, {"api_name": "zope.lifecycleevent.IObjectRemovedEvent", "line_number": 18, "usage_type": "name"}, {"api_name": "opengever.dossier.docprops.DocPropertyWriter", "line_number": 28, "usage_type": "call"}, {"api_name": "opengever.document.archival_file.ArchivalFileConverter", "line_number": 45, "usage_type": "call"}, {"api_name": "opengever.document.archival_file.ArchivalFileConverter", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "307120840", "text": "from django.conf.urls import url\nfrom taps_oan import views\n\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'about/$', views.about, name='about'),\n url(r'^add_pub/$', views.add_pub, name='add_pub'),\n url(r'^pub/(?P[\\w\\-]+)/$', \n views.show_pub, name='show_pub'),\n url(r'^pub/(?P[\\w\\-]+)/add_beer/$', \n views.add_beer, name='add_beer'),\n url(r'^beer/(?P[\\w\\-]+)/$', \n views.show_beer, name='show_beer'),\n url(r'^beer/(?P[\\w\\-]+)/add_carrier/$',\n views.add_carrier, name='add_carrier'),\n url(r'^register/$',\n views.register,name='register'),\n url(r'^login/$', \n views.user_login, name='login'),\n url(r'^logout/$', \n views.user_logout, name='logout'),\n url(r'^account/$',\n views.account, name='account'),\n url(r'^yelp/(?P[\\w\\-]+)/$', \n views.yelpLookUp, name='yelpLookUp'),\n\n]\n", "sub_path": "taps_oan/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "taps_oan.views.index", "line_number": 6, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "taps_oan.views.about", "line_number": 7, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "taps_oan.views.add_pub", "line_number": 8, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "taps_oan.views.show_pub", "line_number": 10, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "taps_oan.views.add_beer", "line_number": 12, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "taps_oan.views.show_beer", "line_number": 14, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "taps_oan.views.add_carrier", "line_number": 16, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "taps_oan.views.register", "line_number": 18, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "taps_oan.views.user_login", "line_number": 20, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "taps_oan.views.user_logout", "line_number": 22, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "taps_oan.views.account", "line_number": 24, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "taps_oan.views.yelpLookUp", "line_number": 26, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "319954014", "text": "from flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom utils.image_diff import ImageDiff\nfrom utils.image_merge import Stitcher\nfrom utils.image_similar import HashSimilar\nfrom utils.image_text import get_text_roi\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/vision/diff', methods=[\"POST\"])\ndef vision_diff():\n data = {\n \"code\": 0,\n \"data\": ImageDiff().get_image_score(request.json['image1'], request.json['image2'],\n request.json['image_diff_name'])\n }\n return jsonify(data)\n\n\n@app.route('/vision/merge', methods=[\"POST\"])\ndef vision_merge():\n data = {\n \"code\": 0,\n \"data\": Stitcher(request.json['image_list']).image_merge(request.json['name'])\n }\n return jsonify(data)\n\n\n@app.route('/vision/similar', methods=[\"POST\"])\ndef vision_similar():\n data = {\n \"code\": 0,\n \"data\": HashSimilar().get_hash_similar(request.json['image1'], request.json['image2'])\n }\n return jsonify(data)\n\n\n@app.route('/vision/text', methods=[\"POST\"])\ndef vision_text():\n data = {\n \"code\": 0,\n \"data\": get_text_roi(request.json['image'])\n }\n return jsonify(data)\n\n\n@app.errorhandler(Exception)\ndef error(e):\n ret = dict()\n ret[\"code\"] = 1\n ret[\"data\"] = repr(e)\n return jsonify(ret)\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=9092)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.image_diff.ImageDiff", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.image_merge.Stitcher", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.image_similar.HashSimilar", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.image_text.get_text_roi", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "148894343", "text": "import pandas\nimport logging\n\nfrom osgeo import ogr\n\n\nlogging.basicConfig()\n\n\ndef load_polygons():\n driver = ogr.GetDriverByName('ESRI Shapefile')\n polyshp = driver.Open('gis/shp/neighborhoods.shp')\n polygon_layer = polyshp.GetLayer(0)\n return list(polygon_layer)\n\n\ndef get_point(x, y):\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(x * 10000, y * 10000)\n return point\n\n\ndef classify_point(point, polygons):\n for polygon in polygons:\n if polygon.GetGeometryRef().Contains(point):\n return polygon.GetField('nhood')\n logging.info(\"No polygon found for {}\".format(str(point)))\n return \"Unknown\"\n\n\ndef classify_df(df):\n polygons = load_polygons()\n coords = zip(df.x.tolist(), df.y.tolist())\n df['classified_neighborhood'] = pandas.Series([classify_point(get_point(*coord), polygons) for coord in coords])\n", "sub_path": "data/geo_classifier.py", "file_name": "geo_classifier.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 7, "usage_type": "call"}, {"api_name": "osgeo.ogr.GetDriverByName", "line_number": 11, "usage_type": "call"}, {"api_name": "osgeo.ogr", "line_number": 11, "usage_type": "name"}, {"api_name": "osgeo.ogr.Geometry", "line_number": 18, "usage_type": "call"}, {"api_name": "osgeo.ogr", "line_number": 18, "usage_type": "name"}, {"api_name": "osgeo.ogr.wkbPoint", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "602649974", "text": "\"\"\"\nAuthors:\nDavid Saper - 302598032 dav_sap\nAlon Perelmuter - 20063088 alonperl\n\"\"\"\n\nfrom pox.core import core\nimport pox.openflow.libopenflow_01 as of\nimport time\nimport threading\nfrom pox.core import core\nimport pox.openflow.libopenflow_01 as of\nimport utils\nfrom pox.lib.packet.lldp import lldp, chassis_id, port_id, ttl, end_tlv\nfrom pox.lib.packet.ethernet import ethernet\nlog = core.getLogger()\ntutorial_list = []\n\nclass Tutorial (object):\n \"\"\"\n A Tutorial object is created for each switch that connects.\n A Connection object for that switch is passed to the __init__ function.\n \"\"\"\n\n def __init__ (self, connection):\n self.forward_table = {}\n self.connection = connection\n self.unauthorized_ports = []\n # self.discovery = Discovery()\n # Discovery.get_node(connection.dpid).connection = connection\n # This binds our PacketIn event listener\n connection.addListeners(self)\n def update_flow_table(self):\n \"\"\"\n This function goes over all ports found in unauthorized_port (port that are\n forbidden from graph because of the spanning tree.) and ask remove_rules_by_port\n to remove all the flows holding the forbidden ports.\n \"\"\"\n # log.debug('update flow table for switch {}'.format(self.connection.dpid))\n for port in self.unauthorized_ports:\n self.remove_rules_by_port(port)\n\n def _handle_PacketIn (self, event):\n \"\"\"\n Handles packet in messages from the switch.\n \"\"\"\n if event.parsed.type == ethernet.LLDP_TYPE:\n return\n packet = event.parsed # Packet is the original L2 packet sent by the switch\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n # Ignore IPv6 discovery messages\n if \"33:33:00:00:00:\" in str(packet.dst):\n return\n\n packet_in = event.ofp # packet_in is the OpenFlow packet sent by the switch\n\n self.act_like_switch(packet, packet_in)\n\n def send_packet (self, buffer_id, raw_data, out_port, in_port):\n \"\"\"\n Sends a packet out of the specified switch port.\n If buffer_id is a valid buffer on the switch, use that. Otherwise,\n send the raw data in raw_data.\n The \"in_port\" is the port number that packet arrived on. Use\n OFPP_NONE if you're generating this packet.\n \"\"\"\n # We tell the switch to take the packet with id buffer_if from in_port\n # and send it to out_port\n # If the switch did not specify a buffer_id, it must have specified\n # the raw data of the packet, so in this case we tell it to send\n # the raw data\n msg = of.ofp_packet_out()\n msg.in_port = in_port\n if buffer_id != -1 and buffer_id is not None:\n # We got a buffer ID from the switch; use that\n msg.buffer_id = buffer_id\n else:\n # No buffer ID from switch -- we got the raw data\n if raw_data is None:\n # No raw_data specified -- nothing to send!\n return\n msg.data = raw_data\n\n # Add an action to send to the specified port\n action = of.ofp_action_output(port = out_port)\n msg.actions.append(action)\n\n # Send message to switch\n self.connection.send(msg)\n\n def send_flow_mod(self, packet, packet_in, out_port):\n \"\"\"\n This function install a flow for a specific request to the switch\n \"\"\"\n log.debug(\"Installing new flow rule on SW: {}; in_port: {}; dl_src: {}; dl_dst: {}\".format(self.connection.dpid, packet_in.in_port, packet.src,packet.dst))\n fm = of.ofp_flow_mod()\n fm.match.in_port = packet_in.in_port\n fm.match.dl_dst = packet.dst\n fm.match.dl_src = packet.src\n # it is not mandatory to set fm.data or fm.buffer_id\n if packet_in.buffer_id != -1 and packet_in.buffer_id is not None:\n # Valid buffer ID was sent from switch, we do not need to encapsulate raw data in response\n fm.buffer_id = packet_in.buffer_id\n else:\n if packet_in.data is not None:\n # No valid buffer ID was sent but raw data exists, send raw data with flow_mod\n fm.data = packet_in.data\n else:\n return\n action = of.ofp_action_output(port=out_port)\n fm.actions.append(action)\n\n # Send message to switch\n self.connection.send(fm)\n\n\n\n\n def act_like_switch(self, packet, packet_in):\n \"\"\"\n This function manage the forwarding table of the switch.\n the func. gets the packet the the switch passed to the controller,\n and decide whether to ignore the packet or install a new flow to the switch,\n or just flood the packet.\n \"\"\"\n if packet_in.in_port in self.unauthorized_ports:\n log.debug(\"SW \" + str(self.connection.dpid) + \" got packet from unauthorized port \" + str(packet_in.in_port))\n return\n if packet.src in self.forward_table and packet_in.in_port != self.forward_table[packet.src]:\n self.remove_flow(packet.src)\n self.forward_table[packet.src] = packet_in.in_port\n if packet.dst in self.forward_table:\n self.send_flow_mod(packet, packet_in, self.forward_table[packet.dst])\n else:\n ####FLOODING packet\n ports_list = self.connection.features.ports\n log.debug('SW:' + str(\n self.connection.dpid) + '; Flooding packet: dest = {}; src = {}; from in_port = {}; to all ports except '\n 'unauthorized = {}'.format(packet.dst, packet.src, packet_in.in_port, self.unauthorized_ports))\n for port in ports_list:\n if port.port_no not in self.unauthorized_ports and \\\n port.port_no < of.OFPP_MAX and \\\n port.port_no != packet_in.in_port:\n self.send_packet(None, packet_in.data, port.port_no, packet_in.in_port)\n\n def remove_flow(self, source):\n \"\"\"\n This function removes a flow from the switch by source mac address.\n It helps while links are turn off\n \"\"\"\n log.debug('Remove flow from SW: {} ; dl_dest = {}'.format(self.connection.dpid, source))\n fm = of.ofp_flow_mod()\n fm.command = of.OFPFC_DELETE\n # fm.match.dl_dst = source # change this if necessary\n fm.match.dl_dst = source # change this if necessary\n self.connection.send(fm) # send flow-mod message\n\n def remove_rules_by_port(self, port):\n \"\"\"\n This function removes a flow from the switch according to a given port number.\n It helps while removing edged in the graph.\n it will clean all the flows in the switch that connected to the port\n \"\"\"\n log.debug(\"Remove flow from SW: {} out_port:{}\".format(self.connection.dpid,port))\n msg = of.ofp_flow_mod(command=of.OFPFC_DELETE, out_port=port)\n self.connection.send(msg)\n mac_to_remove = []\n for mac, in_port in self.forward_table.iteritems():\n if in_port == port:\n mac_to_remove.append(mac)\n fm = of.ofp_flow_mod()\n fm.command = of.OFPFC_DELETE\n fm.match.dl_src = mac\n self.connection.send(fm)\n for mac in mac_to_remove:\n del self.forward_table[mac]\n\n\nclass Discovery(object):\n __metaclass__ = utils.SingletonType\n LLDP_INTERVAL = 1\n TIME_TO_REMOVE = 6\n LLDP_DST_ADDR = '\\x01\\x80\\xc2\\x00\\x00\\x0e'\n def __init__(self):\n core.openflow.addListeners(self)\n self.topology = utils.Graph()\n self.edge_timer = utils.Timer(3,self.run_edges,recurring=True)\n self.lock = threading.Lock()\n self.sub_tree = []\n\n\n\n def is_port_active(self, node, port):\n \"\"\"\"\n This function gets a Node and a port number, It will go over the sub_tree,\n (sub_tree is the kruskal minimum spanning tree) and will return if the given\n port is a forbidden port.\n :return:\n True : if link is active\n False: if link is forbidden\n \"\"\"\n for edge in self.sub_tree:\n if node in edge:\n if self.topology.nodes[node][port][0] in edge:\n return True\n return False\n def _handle_ConnectionUp(self, event):\n \"\"\"\"\n Will be called when a switch is added. Use event.dpid for switch ID,\n and event.connection.send(...) to send messages to the switch.\n \"\"\"\n timer = utils.Timer(Discovery.LLDP_INTERVAL,self._send_lldp,args=[event],recurring=True)\n log.debug(\"New switch ConnectionUp dpid: {}\".format(event.dpid))\n self.lock.acquire()\n node = utils.Node(event.dpid)\n self.set_tutorial(node, event.connection)\n\n self.topology.add_node(node, {})\n self.lock.release()\n #send flow to the switch to pass every lldp packet to the controller\n fm = of.ofp_flow_mod()\n fm.match.dl_type = ethernet.LLDP_TYPE\n fm.match.dl_dst = self.LLDP_DST_ADDR\n # it is not mandatory to set fm.data or fm.buffer_id\n action = of.ofp_action_output(port=of.OFPP_CONTROLLER)\n fm.actions.append(action)\n # Send flow to the switch\n event.connection.send(fm)\n\n @staticmethod\n def set_tutorial(node, connection):\n \"\"\"\"\n connect given node to his real Tutorial.\n \"\"\"\n for tuto in tutorial_list:\n if tuto.connection == connection:\n node.tutorial = tuto\n return True\n return False\n def _handle_ConnectionDown(self, event):\n \"\"\"\"\n Will be called when a switch goes down. Use event.dpid for switch ID.\n \"\"\"\n log.debug(\"_handle_ConnectionDown: dpid {}\".format(event.dpid))\n self.lock.acquire()\n node = self.get_node(event.dpid)\n far_ends = []\n for port, port_data in self.topology.nodes[node].iteritems():\n far_ends.append(port_data[0])\n for far in far_ends:\n self.remove_edge((node, far))\n self.topology.remove_node(node)\n self.Kruskal_Mst()\n self.lock.release()\n\n def _handle_PortStatus(self, event):\n \"\"\"\"\n Will be called when a link changes. Specifically, when event.ofp.desc.config is 1,\n it means that the link is down. Use event.dpid for switch ID and event.port for port number.\n \"\"\"\n log.debug(\"_handle_PortStatus: SW {} port{}; status {}\".format(event.dpid, event.port, event.ofp.desc.config))\n if event.ofp.desc.config == 1:\n #port is down\n self.lock.acquire()\n node = self.get_node(event.dpid)\n if event.port in self.topology.nodes[node]:\n far_node = self.topology.nodes[node][event.port][0]\n edge = (node, far_node)\n self.remove_edge(edge)\n log.debug(\"Removed edge (sw{})<>(sw{}); Reason: ports are down\".format(str(node),str(far_node)))\n # log.debug(str(far_node) +self.ports_dict_to_string(self.topology.nodes[far_node]))\n self.Kruskal_Mst()\n # else:\n # log.debug(\"Trying to remove a not active edge : Switch {} port{}\".format(event.dpid, event.port))\n self.lock.release()\n\n\n\n\n def _handle_PacketIn(self, event):\n \"\"\"\"\n Will be called when a packet is sent to the controller. Same as in the previous part.\n Use it to find LLDP packets (event.parsed.type == ethernet.LLDP_TYPE) and update\n the topology according to them.\n \"\"\"\n if event.parsed.type != ethernet.LLDP_TYPE:\n return\n\n pkt = event.parsed\n lldp_p = pkt.payload\n ch_id = lldp_p.tlvs[0]\n po_id = lldp_p.tlvs[1]\n\n r_dpid = int(ch_id.id)\n r_port = int(po_id.id)\n # log.debug(\"Discovery _handle_PacketIn to dpid {} from Sw{}port{}\".format(event.dpid, r_dpid, r_port))\n self.lock.acquire()\n node = self.get_node(event.dpid)\n far_node = self.get_node(r_dpid)\n if self.topology.get_edge(node, far_node):\n self.topology.update_edge(node, far_node, time.time())\n else:\n log.debug(\"Discovered new edge: (sw: \" + str(node) + \"; port: \" + str(event.port) + \") <> (sw: \" + str(\n r_dpid) + \"; port: \" + str(r_port) + \")\")\n self.topology.add_edge(node,far_node,time.time())\n self.topology.nodes[node][event.port] = (far_node,r_port)\n self.topology.nodes[far_node][r_port] = (node, event.port)\n self.Kruskal_Mst()\n self.lock.release()\n\n def ports_dict_to_string(self,ports):\n \"\"\"\"\n This function gets a dictionary of ports and return a string of all the nodes.\n raised for log reasons.\n \"\"\"\n str_ports = ''\n for port,far in ports.iteritems():\n str_ports += \"p:\" + str(port) + \" far_node:\"+str(far[0]) + \" far_port:\"+str(far[1])\n return str_ports\n\n def _send_lldp(self, event ):\n \"\"\"\"\n \"\"\"\"\"\n # log.debug('Flooding packet : dest = {} src = {} in_port = {}'.format(packet.dst, packet.src, packet_in.in_port))\n # self.send_packet(packet_in.buffer_id, packet_in.data, of.OFPP_FLOOD, packet_in.in_port)\n # log.debug(\"send lldp sw : {}\".format(event.dpid))\n dst = Discovery.LLDP_DST_ADDR\t\t# == '\\x01\\x80\\xc2\\x00\\x00\\x0e'\n\n for p in event.ofp.ports:\n if p.port_no < of.OFPP_MAX:\n # Build LLDP packet\n src = str(p.hw_addr)\n port = p.port_no\n\n lldp_p = lldp() # create LLDP payload\n ch_id = chassis_id() # Add switch ID part\n ch_id.subtype = 1\n ch_id.id = str(event.dpid)\n lldp_p.add_tlv(ch_id)\n po_id = port_id() # Add port ID part\n po_id.subtype = 2\n po_id.id = str(port)\n lldp_p.add_tlv(po_id)\n tt = ttl() # Add TTL\n tt.ttl = Discovery.LLDP_INTERVAL # == 1\n lldp_p.add_tlv(tt)\n lldp_p.add_tlv(end_tlv())\n\n ether = ethernet() # Create an Ethernet packet\n ether.type = ethernet.LLDP_TYPE # Set its type to LLDP\n ether.src = src # Set src, dst\n ether.dst = dst\n ether.payload = lldp_p # Set payload to be the LLDP payload\n\n # send LLDP packet\n pkt = of.ofp_packet_out(action = of.ofp_action_output(port = port))\n pkt.data = ether\n event.connection.send(pkt)\n def run_edges(self):\n \"\"\"\"\n scan timestamps of all edges. If an edge was not seen for more than 6 seconds, remove it from the topology.\n \"\"\"\n self.lock.acquire()\n edges_to_remove = []\n for edge,data in self.topology.edges.iteritems():\n if time.time()-data > Discovery.TIME_TO_REMOVE:\n log.debug(\"Removed edge (sw{}<>sw{}); Reason: LLDP not arrived for long time. timeout. : \"\n \"\".format(edge[0].dpid,edge[1].dpid) )\n edges_to_remove += [edge]\n if edges_to_remove:\n for e in edges_to_remove:\n self.remove_edge(e)\n self.Kruskal_Mst()\n\n self.lock.release()\n\n def remove_edge(self, edge):\n # del information from nodes\n port0 = -1\n port1 = -1\n log.debug(\"remove edge ({},{})\".format(edge[0],edge[1]))\n for port, port_data in self.topology.nodes[edge[0]].iteritems():\n if port_data[0] == edge[1]:\n log.debug(\"empty the ports \" + str(port) + str(port_data[0].dpid))\n port0 = port\n port1 = port_data[1]\n del self.topology.nodes[edge[0]][port0]\n edge[0].tutorial.remove_rules_by_port(port0)\n del self.topology.nodes[edge[1]][port1]\n edge[1].tutorial.remove_rules_by_port(port1)\n # remove edge\n self.topology.delete_edge(edge[0], edge[1])\n def get_node(self, dpid):\n for node in self.topology.nodes:\n if node.dpid == dpid:\n return node\n\n def Kruskal_Mst(self):\n \"\"\"\"\n This function calculate the minimum spanning tree by kruskal algorithm\n it will update the self.sub_tree with his decision.\n It also calls for update_unauthorized_ports that will update all the nodes\n of the graph by the MST demands.\n \"\"\"\n self.sub_tree = []\n uf = utils.UnionFind()\n for v in self.topology.nodes:\n uf.make_set(v)\n for edge in self.topology.edges:\n if uf.find(edge[0]) != uf.find(edge[1]):\n self.sub_tree.append((edge[0],edge[1]))\n uf.union(edge[0],edge[1])\n log.debug(\"Kruskal full graph: {}\".format(self.edges_to_str(self.topology.edges)))\n log.debug(\"Kruskal MST: {} [these are the active links]\".format(self.edges_to_str(self.sub_tree)))\n self.update_unauthorized_ports()\n\n def edges_to_str(self,edges):\n \"\"\"\"\n :return: string of all the edges\n \"\"\"\n str_to_print = ''\n for edge in edges:\n str_to_print += '(' + str(edge[0]) + \",\" + str(edge[1]) + \") \"\n return str_to_print\n\n def update_unauthorized_ports(self):\n \"\"\"\"\n This Function will go over all the nodes and will update all the unauthorized ports\n it will call updae_flow_table of each node that will update his flow table.\n \"\"\"\n for node, ports in self.topology.nodes.iteritems():\n node.tutorial.unauthorized_ports = []\n for port in ports:\n if not self.is_port_active(node, port):\n node.tutorial.unauthorized_ports.append(port)\n log.debug(\"sw: \" + str(node) + \"; unauthorized ports by ST are: \" + str(node.tutorial.unauthorized_ports))\n node.tutorial.update_flow_table()\n\ndef launch ():\n \"\"\"\n Starts the component\n \"\"\"\n def start_switch (event):\n log.debug(\"Controlling %s\" % (event.connection,))\n t = Tutorial(event.connection)\n tutorial_list.append(t)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)\n core.register('discovery', Discovery())\n\n", "sub_path": "of_learning_switch_spanning_tree.py", "file_name": "of_learning_switch_spanning_tree.py", "file_ext": "py", "file_size_in_byte": 18488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pox.core.core.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "pox.core.core", "line_number": 16, "usage_type": "name"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 47, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_packet_out", "line_number": 75, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 75, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 88, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 88, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 99, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 99, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 113, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 113, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPP_MAX", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 145, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 155, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 155, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPFC_DELETE", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 156, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 168, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 168, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPFC_DELETE", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 174, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 174, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPFC_DELETE", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.SingletonType", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pox.core.core.openflow.addListeners", "line_number": 188, "usage_type": "call"}, {"api_name": "pox.core.core.openflow", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pox.core.core", "line_number": 188, "usage_type": "name"}, {"api_name": "utils.Graph", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.Timer", "line_number": 190, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 191, "usage_type": "call"}, {"api_name": "utils.Timer", "line_number": 215, "usage_type": "call"}, {"api_name": "utils.Node", "line_number": 218, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 224, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 224, "usage_type": "name"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 225, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 228, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 228, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPP_CONTROLLER", "line_number": 228, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 289, "usage_type": "name"}, {"api_name": "time.time", "line_number": 304, "usage_type": "call"}, {"api_name": "time.time", "line_number": 308, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01.OFPP_MAX", "line_number": 333, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 333, "usage_type": "name"}, {"api_name": "pox.lib.packet.lldp.lldp", "line_number": 338, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.chassis_id", "line_number": 339, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.port_id", "line_number": 343, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.ttl", "line_number": 347, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.end_tlv", "line_number": 350, "usage_type": "call"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 352, "usage_type": "call"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 353, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 353, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_packet_out", "line_number": 359, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 359, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 359, "usage_type": "call"}, {"api_name": "time.time", "line_number": 369, "usage_type": "call"}, {"api_name": "utils.UnionFind", "line_number": 409, "usage_type": "call"}, {"api_name": "pox.core.core.openflow.addListenerByName", "line_number": 450, "usage_type": "call"}, {"api_name": "pox.core.core.openflow", "line_number": 450, "usage_type": "attribute"}, {"api_name": "pox.core.core", "line_number": 450, "usage_type": "name"}, {"api_name": "pox.core.core.register", "line_number": 451, "usage_type": "call"}, {"api_name": "pox.core.core", "line_number": 451, "usage_type": "name"}]} +{"seq_id": "606320313", "text": "from datetime import datetime\nfrom typing import List\nfrom enum import Enum\nfrom pprint import pprint\nfrom acaisdk.utils.utils import bytes_to_size\n\n\nclass Alignment(Enum):\n LEFT = '{{:{}}}'\n RIGHT = '{{:>{}}}'\n\n\nclass PrettyPrint:\n @staticmethod\n def single_col(data: List, lexi_sort=False):\n if lexi_sort:\n data = sorted(data)\n for l in data:\n print(l)\n\n @staticmethod\n def list_with_meta(file_set, file_ids: List[str], files_meta: List,\n human_readable_size=True):\n \"\"\"\n :param file_set:\n :param human_readable_size:\n :param file_ids:\n :param files_meta: expects a list of meta dicts, one dict per file\n :return:\n \"\"\"\n sorted_file_ids = PrettyPrint.sort_by_type(file_ids)\n id_to_meta = {d['_id']: d for d in files_meta} # type: dict\n\n # Columns: FilePath:Version, size, createdBy, createdAt\n cols = [\n ['[{}]'.format(file_set) if file_set else '[/]',\n 'size',\n 'user',\n 'created']\n ]\n align = [Alignment.LEFT,\n Alignment.RIGHT,\n Alignment.RIGHT,\n Alignment.RIGHT]\n\n # Maybe some file_ids does not have meta\n for fid in sorted_file_ids:\n if fid in id_to_meta:\n size = str(id_to_meta[fid]['__size__'])\n if human_readable_size:\n size = bytes_to_size(int(size))\n uid = str(id_to_meta[fid]['__creator_id__'])\n created_at = id_to_meta[fid]['__create_time__'] // 1000\n ts = datetime \\\n .utcfromtimestamp(created_at) \\\n .strftime('%Y-%m-%d %H:%M:%S')\n cols.append([fid, size, uid, ts])\n else:\n cols.append([fid, '-', '-', '-'])\n\n PrettyPrint.aligned_print(cols, align)\n\n @staticmethod\n def aligned_print(rows: List[List[str]],\n alignment: List[Alignment]):\n # Loop and set max column width\n max_col_width = [0, 0, 0, 0]\n for row in rows:\n for i, c in enumerate(row):\n max_col_width[i] = max(max_col_width[i], len(c))\n # Print\n template = ' '.join([alignment[i].value.format(w)\n for i, w in enumerate(max_col_width)])\n for r in rows:\n print(template.format(*r))\n\n @staticmethod\n def job(j):\n print('Registered job id: {}'.format(j.id))\n pprint(dict(j.dict))\n\n @staticmethod\n def sort_by_type(file_ids):\n dirs = [f for f in file_ids if f.endswith('/')]\n files = [f for f in file_ids if not f.endswith('/')]\n return sorted(dirs) + sorted(files)\n\n @staticmethod\n def print(content):\n pprint(content)\n", "sub_path": "acaicli/prettyprint.py", "file_name": "prettyprint.py", "file_ext": "py", "file_size_in_byte": 2855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "enum.Enum", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "acaisdk.utils.utils.bytes_to_size", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 80, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "243876995", "text": "import tablib\n\n\ndefault_config = {}\n\n\nclass ScrubError(Exception):\n pass\n\n\nclass Report(tablib.Dataset):\n\n def __init__(self, headers, *args, **kwargs):\n\n self.config = kwargs.pop('config', default_config.copy())\n # we don't want to set self.headers until our dataset is ready\n self._headers = headers\n self.obj_list = args\n\n dataset_data = list(self.obj_data_generator())\n self.tablib_init(*dataset_data, headers=self._headers, **kwargs)\n\n @classmethod\n def register_new_formats(cls, formats):\n for fmt in formats:\n def _import(self):\n return fmt.import_set(self)\n\n def export(self):\n return fmt.export_set(self)\n\n setattr(cls, fmt.title, property(export, _import))\n\n def tablib_init(self, *args, **kwargs):\n return super(Report, self).__init__(*args, **kwargs)\n\n def default_scrub(self, obj, header):\n\n if hasattr(obj, 'keys'):\n try:\n return obj[header]\n except KeyError:\n if self.config.get('raise_on_scrub_failure', False):\n raise ScrubError(\"mapping object has no value for `%s`\" \\\n % header)\n else:\n return None\n\n try:\n scrubbed_value = getattr(obj, header)\n except AttributeError:\n if self.config.get('raise_on_scrub_failure', False):\n raise ScrubError(\"obj has no attribute %s\" % header)\n else:\n return None\n\n return scrubbed_value\n\n def scrub(self, obj):\n \"\"\"Turn report_data into a dict of cleaned data\"\"\"\n return {}\n\n def obj_data_generator(self):\n \"\"\"Turn Report data into a proper tablib Dataset\n\n #) Get list of headers that scrub will handle\n #) for other fields, attempt default_scrub method\n #) by default, if default_scrub fails, use None. However\n if self.config['raise_on_scrub_failure'] is True, ValueError will raise\n instead\n \"\"\"\n\n for obj in self.obj_list:\n scrubbed_data = self.scrub(obj)\n\n def get_value(header, obj):\n\n # we can't use dict.get's default parameter as it will run\n # default_scrub even if the value is in the scrub dict\n try:\n value = scrubbed_data[header]\n except KeyError:\n value = self.default_scrub(obj, header)\n return value\n\n values = [\n get_value(header, obj)\n for header in self._headers\n ]\n\n yield values\n", "sub_path": "bly/reports.py", "file_name": "reports.py", "file_ext": "py", "file_size_in_byte": 2729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tablib.Dataset", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "585620974", "text": "from collections import defaultdict\nfrom typing import List, OrderedDict\n\n\nclass Solution:\n def fullBloomFlowers(self, flowers: List[List[int]], persons: List[int]) -> List[int]:\n states = defaultdict(int)\n for flower in flowers:\n start, end = flower\n end = end + 1\n if start not in states:\n states[start] = 1\n else:\n states[start] += 1\n if end not in states:\n states[end] = -1\n else:\n states[end] -= 1\n states = list(states.items())\n states.sort(key=lambda x: x[0])\n for i in range(1, len(states)):\n states[i] = (states[i][0], states[i][1] + states[i - 1][1])\n print(states)\n res = []\n print(states)\n from bisect import bisect\n for time in persons:\n pos = bisect(states, time, key=lambda x:x[0])\n if pos time:\n res.append(states[pos-1][1])\n else:\n res.append(states[pos][1])\n else:\n res.append(0)\n return res\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.fullBloomFlowers([[36,39],[29,49],[32,35],[14,43],[42,49],[48,48],[32,46],[6,41],[14,19]],\n[14,4]))", "sub_path": "2251.py", "file_name": "2251.py", "file_ext": "py", "file_size_in_byte": 1364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "bisect.bisect", "line_number": 28, "usage_type": "call"}, {"api_name": "{'bisect': 'bisect.bisect'}", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "219494275", "text": "from datetime import date, timedelta\n\nimport pytest\nfrom pytest import fixture\nfrom utilities.models.data_models import (\n Service,\n set_empty_list,\n set_service_regions,\n set_service_vehicle,\n)\n\n\n@pytest.mark.model\n@pytest.mark.unit\nclass TestService:\n \"\"\"Battery of tests for Service data model functionality.\"\"\"\n\n @pytest.fixture\n def valid_service(self) -> Service:\n \"\"\"Create a valid Service.\"\"\"\n service: Service = Service(name='Testing Service')\n\n yield service\n\n @pytest.mark.low\n def test_build__override_default_values(self, valid_service: fixture) -> None:\n \"\"\"Check that default values may be overridden post-build.\"\"\"\n service: Service = valid_service\n service.recurring_rides_enabled = True\n\n assert service.recurring_rides_enabled is True\n\n @pytest.mark.low\n def test_build__requires_name_param(self) -> None:\n \"\"\"Attempt to build a new service without entering params.\"\"\"\n with pytest.raises(TypeError) as e:\n Service() # type: ignore\n assert \"required positional argument: 'name'\" in str(e.value)\n\n @pytest.mark.low\n def test_build__set_default_values(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets default values.\"\"\"\n service: Service = valid_service\n\n assert (\n service.friday is True\n and service.monday is True\n and service.saturday is True\n and service.sunday is True\n and service.thursday is True\n and service.tuesday is True\n and service.wednesday is True\n and service.color == '1e88e5'\n and service.end_time == 86340\n and service.fare_required is False\n and service.in_advance_enabled is False\n and service.managed_mode is False\n and service.max_capacity == 10\n and service.on_demand_enabled is True\n and service.recurring_rides_enabled is False\n and service.rider_restricted is False\n and service.shibboleth_restricted is False\n and service.start_time == 0\n and service.stop_restriction == 'unrestricted'\n and service.wheelchair_accessible is True\n )\n\n @pytest.mark.low\n def test_build__set_empty_fields(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets empty lists.\"\"\"\n service: Service = valid_service\n\n assert service.addresses == set_empty_list() and service.exceptions == set_empty_list()\n\n @pytest.mark.low\n def test_build__set_end_date(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets an end date 10 days in the future.\"\"\"\n service: Service = valid_service\n\n assert date.isoformat(date.today() + timedelta(days=10)) in service.end_date\n\n @pytest.mark.low\n def test_build__set_none_values(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets None values.\"\"\"\n service: Service = valid_service\n\n assert (\n service.service_id is None\n and service.fare_price is None\n and service.max_schedule_time is None\n and service.shibboleth_affiliation is None\n and service.token_transit_fare_id is None\n )\n\n @pytest.mark.low\n def test_build__set_regions(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets a region.\"\"\"\n service: Service = valid_service\n\n assert service.regions == set_service_regions()\n\n @pytest.mark.low\n def test_build__set_start_date(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets a start date 1 day in the past.\"\"\"\n service: Service = valid_service\n\n assert date.isoformat(date.today() - timedelta(days=1)) in service.start_date\n\n @pytest.mark.low\n def test_build__set_vehicles(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets a vehicle.\"\"\"\n service: Service = valid_service\n\n assert service.vehicles == set_service_vehicle()\n\n @pytest.mark.low\n def test_build__valid_input(self, valid_service: fixture) -> None:\n \"\"\"Build a service with valid input.\"\"\"\n service: Service = valid_service\n\n assert service.name == 'Testing Service'\n\n @pytest.mark.low\n def test_model__override_default_values(self) -> None:\n \"\"\"Check that default values may be overridden prior to build.\"\"\"\n service: Service = Service(name='Testing Service', recurring_rides_enabled=True)\n\n assert service.recurring_rides_enabled is True\n", "sub_path": "integration/models/test_service.py", "file_name": "test_service.py", "file_ext": "py", "file_size_in_byte": 4689, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "utilities.models.data_models.Service", "line_number": 21, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "attribute"}, {"api_name": "utilities.models.data_models.Service", "line_number": 19, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 28, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 36, "usage_type": "call"}, {"api_name": "utilities.models.data_models.Service", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 41, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 43, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 69, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 71, "usage_type": "name"}, {"api_name": "utilities.models.data_models.set_empty_list", "line_number": 73, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 76, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 78, "usage_type": "name"}, {"api_name": "datetime.date.isoformat", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 80, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 83, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 85, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 96, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 98, "usage_type": "name"}, {"api_name": "utilities.models.data_models.set_service_regions", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 103, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 105, "usage_type": "name"}, {"api_name": "datetime.date.isoformat", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 107, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 110, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 112, "usage_type": "name"}, {"api_name": "utilities.models.data_models.set_service_vehicle", "line_number": 114, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 117, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 119, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 116, "usage_type": "attribute"}, {"api_name": "utilities.models.data_models.Service", "line_number": 126, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "520166508", "text": "# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tempfile\nimport os\n\nimport tensorflow as tf\nimport numpy as np\nimport datetime\n\nfrom tensorflow import keras\n\n# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 to 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\ntrain_images = tf.expand_dims(train_images, -1)\ntest_images = tf.expand_dims(test_images, -1)\n\nfrom tensorflow_model_optimization.quantization.keras.vitis.layers import vitis_activation\n\n# Define the model architecture.\ninputs = keras.layers.Input(shape=(28, 28, 1))\nx = keras.layers.Conv2D(\n filters=32, kernel_size=(3, 3), use_bias=True, activation='linear')(\n inputs)\nx = keras.layers.BatchNormalization()(x)\nx = keras.layers.Activation('relu')(x)\nx = keras.layers.DepthwiseConv2D(\n kernel_size=(3, 3), use_bias=True, activation='linear')(\n x)\nx = keras.layers.BatchNormalization()(x)\nx = keras.layers.Activation('gelu')(x)\nx = keras.layers.Flatten()(x)\nx = keras.layers.Dropout(rate=0.1)(x)\nx = keras.layers.Dense(10)(x)\npredictions = x\nmodel = keras.Model(inputs=inputs, outputs=predictions, name=\"mnist_model\")\n\n# Train the float model\nmodel.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['sparse_categorical_accuracy'])\n\nlog_dir = \"logs/float_fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, histogram_freq=1)\nmodel.fit(\n train_images,\n train_labels,\n epochs=1,\n validation_data=(test_images, test_labels))\n\nmodel.save('float.h5')\nmodel.evaluate(test_images, test_labels, batch_size=500)\n\n# Inspect the float model\nfrom tensorflow_model_optimization.quantization.keras import vitis_inspect\n\n# `target` is the target DPU to deploy this model, it can be a name(e.g. \"DPUCZDX8G_ISA1_B4096\"),\n# a json(e.g. \"./U50/arch.json\") or a fingerprint.\ninspector = vitis_inspect.VitisInspector(target='DPUCZDX8G_ISA1_B4096')\n\n# In this model only `gelu` layer is not supported by DPU target.\n# Inspect results will be shown on screen, and more detailed results will be saved in\n# 'inspect_results.txt'. We can also visualize the results in 'model.svg'.\ninspector.inspect_model(\n model,\n input_shape=[1, 28, 28, 1],\n dump_model=True,\n dump_model_file='inspect_model.h5',\n plot=True,\n plot_file='model.svg',\n dump_results=True,\n dump_results_file='inspect_results.txt',\n verbose=0)\n", "sub_path": "examples/vai_quantizer/tensorflow2x/mnist_cnn_inspect.py", "file_name": "mnist_cnn_inspect.py", "file_ext": "py", "file_size_in_byte": 3233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.keras.datasets", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 25, "usage_type": "name"}, {"api_name": "tensorflow.expand_dims", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 37, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 38, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 42, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.DepthwiseConv2D", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 47, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 48, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 49, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow.keras.Model", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 52, "usage_type": "name"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 57, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.TensorBoard", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_inspect.VitisInspector", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_inspect", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "624795403", "text": "import facepy\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom . facepy_wrapper import utils\n\nGRAPH_MAX_TRIES = 3\nFACEBOOK_TIMEOUT = getattr(settings, 'FACEBOOK_AUTH_BACKEND_FACEBOOK_TIMEOUT',\n timezone.timedelta(seconds=20).total_seconds())\nFACEBOOK_API_VERSION = getattr(settings, 'FACEBOOK_API_VERSION', '2.1')\n\n\ndef get_from_graph_api(graphAPI, query):\n for i in range(GRAPH_MAX_TRIES):\n try:\n return graphAPI.get(query)\n except facepy.FacepyError as e:\n if i == GRAPH_MAX_TRIES - 1 or getattr(e, 'code', None) != 1:\n raise\n\n\ndef get_application_graph(version=None):\n version = version or FACEBOOK_API_VERSION\n token = (facepy.utils\n .get_application_access_token(settings.FACEBOOK_APP_ID,\n settings.FACEBOOK_APP_SECRET,\n api_version=version))\n return get_graph(token)\n\n\ndef get_graph(*args, **kwargs):\n version = FACEBOOK_API_VERSION\n return utils.get_graph(*args, version=version, timeout=FACEBOOK_TIMEOUT, **kwargs)\n\n\ndef get_long_lived_access_token(access_token):\n return utils.get_long_lived_access_token(\n access_token=access_token,\n client_id=settings.FACEBOOK_APP_ID,\n client_secret=settings.FACEBOOK_APP_SECRET,\n )\n\n\ndef get_access_token(code=None, redirect_uri=None):\n return utils.get_access_token(\n code=code,\n redirect_uri=redirect_uri,\n client_id=settings.FACEBOOK_APP_ID,\n client_secret=settings.FACEBOOK_APP_SECRET,\n timeout=FACEBOOK_TIMEOUT,\n )\n", "sub_path": "facebook_auth/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.settings", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 9, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 10, "usage_type": "argument"}, {"api_name": "facepy.FacepyError", "line_number": 17, "usage_type": "attribute"}, {"api_name": "facepy.utils.get_application_access_token", "line_number": 24, "usage_type": "call"}, {"api_name": "facepy.utils", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings.FACEBOOK_APP_ID", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_SECRET", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 26, "usage_type": "name"}, {"api_name": "facepy_wrapper.utils.get_graph", "line_number": 33, "usage_type": "call"}, {"api_name": "facepy_wrapper.utils", "line_number": 33, "usage_type": "name"}, {"api_name": "facepy_wrapper.utils.get_long_lived_access_token", "line_number": 37, "usage_type": "call"}, {"api_name": "facepy_wrapper.utils", "line_number": 37, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_ID", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_SECRET", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "facepy_wrapper.utils.get_access_token", "line_number": 45, "usage_type": "call"}, {"api_name": "facepy_wrapper.utils", "line_number": 45, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_ID", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 48, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_SECRET", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "434905892", "text": "from collections import OrderedDict\nfrom dataclasses import dataclass, field\nfrom enum import Enum, auto\nfrom typing import Dict, List, Optional\n\nfrom sqlalchemy.sql.schema import Column, ForeignKeyConstraint, Table\n\n\n@dataclass\nclass Model:\n name: str\n table: Table\n\n @property\n def schema(self) -> str:\n return self.table.schema\n\n\n@dataclass\nclass ModelClass(Model):\n columns: Dict[str, Column] = field(default_factory=OrderedDict)\n relationships: Dict[str, 'Relationship'] = field(default_factory=OrderedDict)\n parent_class: Optional['ModelClass'] = None\n children: List['ModelClass'] = field(default_factory=list)\n\n\nclass RelationshipType(Enum):\n ONE_TO_ONE = auto()\n ONE_TO_MANY = auto()\n MANY_TO_ONE = auto()\n MANY_TO_MANY = auto()\n\n\n@dataclass\nclass Relationship:\n type: RelationshipType\n source: ModelClass\n target: ModelClass\n constraint: Optional[ForeignKeyConstraint] = None\n association_table: Optional[Model] = None\n backref: Optional[str] = None\n remote_side: List[str] = field(default_factory=list)\n foreign_keys: List[str] = field(default_factory=list)\n primaryjoin: Optional[str] = None\n secondaryjoin: Optional[str] = None\n", "sub_path": "src/sqlacodegen/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sqlalchemy.sql.schema.Table", "line_number": 12, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.schema.Column", "line_number": 21, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 21, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 22, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 24, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 19, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 27, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 28, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 29, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 30, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.schema.ForeignKeyConstraint", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 42, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 43, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 45, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "174577214", "text": "from collections.abc import Iterator, Iterable\nfrom typing import Any, List\n\n\nclass MyIterator(Iterator):\n def __init__(self, collection: List[Any]) -> None:\n self._collection = collection\n self._index = 0\n\n def __next__(self):\n try:\n item = self._collection[self._index]\n self._index += 1\n return item\n except IndexError:\n raise StopIteration\n\n\nclass ReverseIterator(Iterator):\n def __init__(self, collection: List[Any]) -> None:\n self._collection = collection\n self._index = -1\n\n def __next__(self):\n try:\n item = self._collection[self._index]\n self._index -= 1\n return item\n except IndexError:\n raise StopIteration\n\n\nclass MyList(Iterable):\n def __init__(self) -> None:\n self._items: List[Any] = []\n self._my_iterator = MyIterator(self._items)\n\n def add(self, value: Any) -> None:\n self._items.append(value)\n\n def __iter__(self) -> Iterator:\n return self._my_iterator\n\n def reverse_iterator(self) -> Iterator:\n return ReverseIterator(self._items)\n\n def __str__(self) -> str:\n return f'{self.__class__.__name__}({self._items})'\n\n\nif __name__ == \"__main__\":\n mylist = MyList()\n mylist.add('Carlos')\n mylist.add('Eduardo')\n mylist.add('Rocha')\n\n print(mylist)\n\n\n for value in mylist:\n print(value)\n\n print()\n\n for value in mylist.reverse_iterator():\n print(value)", "sub_path": "behavioral/iterator/iterator.py", "file_name": "iterator.py", "file_ext": "py", "file_size_in_byte": 1514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.abc.Iterator", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 6, "usage_type": "name"}, {"api_name": "collections.abc.Iterator", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 38, "usage_type": "name"}, {"api_name": "collections.abc.Iterator", "line_number": 41, "usage_type": "name"}, {"api_name": "collections.abc.Iterator", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "445222313", "text": "import pygame,time, math, random, sys,os,random\r\nfrom pygame.locals import *\r\nimport RPi.GPIO as GPIO\r\nfrom gpiozero import Button\r\n\r\n\r\nbutton_jump = Button(23)\r\nbutton_shield = Button(24)\r\n\r\ndef game_initialization():\r\n \r\n global dino1,dino2,background,background1,background2,background3,background4,background5,backgroundx\r\n global e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10\r\n global sky1,sky2\r\n global gameover,shield\r\n dino1 = pygame.image.load('Dino1.png')\r\n dino2 = pygame.image.load('Dino2.png')\r\n background = pygame.image.load('bg.png')\r\n background1 = pygame.image.load('bg.png')\r\n background2 = pygame.image.load('bg2.png')\r\n background3 = pygame.image.load('bg3.png')\r\n background4 = pygame.image.load('bg4.png')\r\n background5 = pygame.image.load('bg5.png')\r\n backgroundx = pygame.image.load('bgx.png')\r\n e0= pygame.image.load('energy_0.png')\r\n e1= pygame.image.load('energy_1.png')\r\n e2= pygame.image.load('energy_2.png')\r\n e3= pygame.image.load('energy_3.png')\r\n e4= pygame.image.load('energy_4.png')\r\n e5= pygame.image.load('energy_5.png')\r\n e6= pygame.image.load('energy_6.png')\r\n e7= pygame.image.load('energy_7.png')\r\n e8= pygame.image.load('energy_8.png')\r\n e9= pygame.image.load('energy_9.png')\r\n e10= pygame.image.load('energy_10.png')\r\n sky1 = pygame.image.load('sk1.png')\r\n sky2 = pygame.image.load('sk2.png')\r\n gameover = pygame.image.load('gameover.png')\r\n shield= pygame.image.load('shield.png')\r\n\r\n global jump_music,die_music,shield_music\r\n pygame.mixer.init()\r\n pygame.time.delay(1000)\r\n jump_music = pygame.mixer.Sound('jump.wav')\r\n jump_music.set_volume(0.2)\r\n die_music = pygame.mixer.Sound('die.wav')\r\n die_music.set_volume(1.0)\r\n shield_music = pygame.mixer.Sound('shield.wav')\r\n shield_music .set_volume(1.0)\r\n\r\n pygame.mixer.music.load(\"david_bgm.mp3\") #load bgm \r\n pygame.mixer.music.play(-1)\r\n\r\n global FPS, bg_x, sk_x,W,H\r\n W, H = 600, 400\r\n HW, HH = W / 2, H / 2\r\n AREA = W * H\r\n FPS = 100\r\n bg_x = 0\r\n sk_x = 0\r\n\r\n global screen\r\n pygame.init()\r\n screen = pygame.display.set_mode((W,H))\r\n\r\n global skset,bgset,d,d1,d2,d3,d4,dset,e\r\n skset=[sky1,sky2]\r\n bgset=[background1,background2,background3,background4,background5]\r\n d = [[872,220,909],[1207,196,1248],[1655,218,1693],[2184,225,2218],[2676,189,2727],[3271,198,3317]]\r\n d1 = [[872,220,909],[1207,196,1248],[1655,218,1693],[2184,225,2218],[2676,189,2727],[3271,198,3317]]\r\n d2 = [[150,175,190],[414,221,595],[982,173,1021],[1411,185,1446],[1650,227,1781],[3222,175,3276]]\r\n d3 = [[546,141,613],[1177,153,1252],[1983,109,2009],[2741,177,3005],[3781,125,3850]]\r\n d4 = [[270,151,365],[666,178,908],[1706,226,2026],[3169,117,3301]]\r\n dset=[d1,d2,d3,d4]\r\n e=[e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10]\r\n\r\n global game_time, score, clock, image, pos_x, pos_y, david, next_bg, next_sk, ground_speed, sky_speed, flash_freq, z, op, energy,fly, flash_time, is_flash, frame, playing\r\n clock = pygame.time.Clock()\r\n image_width = 20 #if you change the image , you need to fill new image width \r\n pos_x = 50\r\n pos_y = 259-image_width\r\n david = David(pos_x, pos_y)\r\n next_bg=random.randint(0,len(dset)-1)\r\n next_sk=random.randint(0,1)\r\n ground_speed = 5\r\n sky_speed = 1\r\n flash_freq = FPS/8\r\n z = 0\r\n op = 1 # op change image of dino\r\n energy = 0\r\n fly = 0\r\n flash_time = 0\r\n is_flash = 0\r\n frame = 0\r\n score = 0\r\n game_time = 0\r\n playing = 1 # playing = 1 -> enter or replay\r\n\r\n\r\n \r\nclass David():\r\n\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n self.isJump = False\r\n self.jumpCount = 15\r\n \r\n\r\n def draw(self,x,op,fly, energy ,is_flash,frame,flash_freq):\r\n myFont = pygame.font.SysFont(\"Times New Roman\", 18)\r\n mytime = myFont.render(str(round(game_time,2))+'s',1,(0,0,0))\r\n screen.blit(mytime,(x+20,int(self.y)+20))\r\n if op==1:\r\n screen.blit(dino1, (x,int(self.y)))\r\n else:\r\n screen.blit(dino2, (x,int(self.y)))\r\n if fly:\r\n screen.blit(shield, (x-20,int(self.y)-5))\r\n else:\r\n if is_flash:\r\n if frame < flash_freq/2:\r\n screen.blit(shield, (x-20,int(self.y)-5))\r\n \r\n \r\n\r\n def jump(self):\r\n if self.isJump:\r\n if self.jumpCount >= -15: \r\n neg = 1\r\n if self.jumpCount < 0:\r\n neg = -1\r\n self.y -= self.jumpCount**2 * 0.1 * neg\r\n self.jumpCount -= 0.5\r\n else:\r\n self.isJump = False\r\n self.jumpCount = 15\r\n \r\n def check(self,x,z,d,bg_x):\r\n global score\r\n if ( (x>=d[z][0]) and (self.y>=d[z][1]) and (x<=d[z][2])):\r\n die_music.play()\r\n screen.blit(gameover,(0,0))\r\n myFont = pygame.font.SysFont(\"Times New Roman\", 30)\r\n mytime = myFont.render(\"Your score is : \"+str(score),1,(0,0,0))\r\n screen.blit(mytime,(300,350))\r\n pygame.mixer.music.stop( ); # end the original bgm before showing the lose screen\r\n pygame.display.update( )#update the screen of result\r\n pygame.mixer.music.load('lose_bgm.mp3')# load the lose bgm\r\n pygame.mixer.music.play( )#play lose bgm (sad violin)\r\n time.sleep(11)#wait for palying whole song\r\n return 0\r\n #pygame.quit()\r\n #sys.exit()\r\n else:\r\n return 1\r\n \r\n#This is teacher's reference code\r\ndef RCtime(RCpin): \r\n reading = 0\r\n GPIO.setup(RCpin,GPIO.OUT)\r\n GPIO.output(RCpin,GPIO.LOW)\r\n time.sleep(0.5)\r\n GPIO.setup(RCpin,GPIO.IN)\r\n while(GPIO.input(RCpin)==GPIO.LOW):\r\n reading += 1\r\n return reading\r\n\r\n\r\ndef game_loop():\r\n \r\n global dino1,dino2,background,background1,background2,background3,background4,background5,backgroundx\r\n global e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10\r\n global sky1,sky2\r\n global gameover,shield\r\n global jump_music,die_music,shield_music\r\n global FPS, bg_x, sk_x,W,H\r\n global screen\r\n global skset,bgset,d,d1,d2,d3,d4,dset,e\r\n global clock, image, pos_x, pos_y, david, next_bg, next_sk, ground_speed, sky_speed, flash_freq, z, op, energy,fly, flash_time, is_flash, frame, playing\r\n global threshold,button_jump,button_shield\r\n global score,game_time\r\n \r\n\r\n #button_jump.wait_for_press() #get start signal to start game\r\n #button_shield.wait_for_press()#same as above\r\n \r\n while True:\r\n score = score + ground_speed\r\n game_time = game_time + round(6/FPS,2)\r\n frame=(frame+1)%flash_freq\r\n if button_jump.is_pressed==0:#this is jump\r\n if not david.isJump:\r\n jump_music.play()\r\n david.isJump = True\r\n if button_shield.is_pressed==0:#this is shield\r\n if fly==0:\r\n if energy>300 and (not is_flash):\r\n shield_music.play()\r\n fly = 1\r\n flash_time = energy\r\n ground_speed = ground_speed + 10\r\n \r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_q:\r\n sys.exit()\r\n \r\n\r\n clock.tick(FPS)\r\n pressed_keys = pygame.key.get_pressed()\r\n screen.blit(background, (bg_x,0))\r\n screen.blit(bgset[next_bg], (bg_x+4000,0))\r\n screen.blit(sky1,(sk_x,0))\r\n screen.blit(skset[next_sk],(sk_x+4000,0))\r\n\r\n \r\n \r\n if(z0:\r\n energy=max(energy-5,0)\r\n \r\n else:\r\n fly=0\r\n ground_speed = ground_speed-10\r\n is_flash = 1\r\n\r\n if is_flash:\r\n if flash_time>0:\r\n flash_time = flash_time-5\r\n else:\r\n is_flash = 0\r\n \r\n david.draw(pos_x,op,fly,energy,is_flash,frame,flash_freq)\r\n david.jump()\r\n bg_x = bg_x-ground_speed\r\n sk_x = sk_x-sky_speed\r\n \r\n if op==1: op=2 #change Dino picture for every loop \r\n else: op=1\r\n \r\n if bg_x<=-4000:\r\n if not fly:\r\n ground_speed = min(ground_speed+1,10)\r\n bg_x=bg_x+4000\r\n z=0\r\n if next_bg==0:\r\n background = pygame.image.load('bg.png')\r\n elif next_bg==1:\r\n background = pygame.image.load('bg2.png')\r\n elif next_bg==2:\r\n background = pygame.image.load('bg3.png')\r\n elif next_bg==3:\r\n background = pygame.image.load('bg4.png')\r\n elif next_bg==4:\r\n background = pygame.image.load('bg5.png')\r\n d=dset[next_bg][:]\r\n next_bg=random.randint(0,len(dset)-1)\r\n \r\n \r\n if sk_x <= -4000:\r\n sky_speed= min(sky_speed+1,5)\r\n sk_x = sk_x+4000\r\n if next_sk == 0:\r\n sky1 = pygame.image.load('sk1.png')\r\n elif next_sk==1:\r\n sky1 = pygame.image.load('sk2.png')\r\n next_sk = random.randint(0,1)\r\n screen.blit(e[int(energy/100)],(300,300))\r\n \r\n myFont = pygame.font.SysFont(\"Times New Roman\", 28)\r\n mytime = myFont.render(\"Your score is : \"+str(score),1,(0,0,0))\r\n screen.blit(mytime,(20,314))\r\n \r\n pygame.display.update()\r\n\r\ndef play_again():\r\n print('press jump to play again')\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if button_jump.is_pressed==0:\r\n return 1\r\n elif button_shield.is_pressed==0:\r\n return 0\r\n \r\n \r\ndef game_play():\r\n while True:\r\n rct = RCtime(25)\r\n print(rct)\r\n if rct < 23000:\r\n game_initialization()\r\n game_loop()\r\n pygame.quit()\r\n sys.exit()\r\n break\r\n \r\n\r\nif __name__ == '__main__':\r\n game_play()\r\n \r\n", "sub_path": "DAVID_rpi_vesion.py", "file_name": "DAVID_rpi_vesion.py", "file_ext": "py", "file_size_in_byte": 10972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gpiozero.Button", "line_number": 7, "usage_type": "call"}, {"api_name": "gpiozero.Button", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 78, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 83, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 144, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 147, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 150, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 151, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 161, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 161, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 161, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 162, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 162, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 162, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 163, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 164, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 164, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 164, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.input", "line_number": 165, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 165, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 206, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 207, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 209, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 210, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 214, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 223, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 226, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 266, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 268, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 268, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 270, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 270, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 272, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 272, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 274, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 274, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 276, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 283, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 283, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 285, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 285, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 286, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 289, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 293, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 300, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 301, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 315, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 316, "usage_type": "call"}]} +{"seq_id": "451461254", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('type_one', '0002_auto_20151022_1958'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ListFailureFiltering',\n fields=[\n ('listfailure_ptr', models.OneToOneField(auto_created=True, primary_key=True, to='type_one.ListFailure', parent_link=True, serialize=False)),\n ('station_name_1', models.ForeignKey(blank=True, null=True, verbose_name='Станция', to='type_one.Station')),\n ],\n bases=('type_one.listfailure', models.Model),\n ),\n ]\n", "sub_path": "view_failures/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "339455496", "text": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_pandas_style_constructor():\n a = ak.Array({\"x\": [1, 2, 3], \"y\": [[1, 2, 3], [], [4, 5]]})\n assert ak.to_list(a) == [\n {\"x\": 1, \"y\": [1, 2, 3]},\n {\"x\": 2, \"y\": []},\n {\"x\": 3, \"y\": [4, 5]},\n ]\n\n\npyarrow = pytest.importorskip(\"pyarrow\")\n\n\ndef test_pyarrow_constructor():\n a = ak.Array(pyarrow.array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]))\n assert ak.to_list(a) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n", "sub_path": "tests/test_0381-fill-documentation-stubs-3.py", "file_name": "test_0381-fill-documentation-stubs-3.py", "file_ext": "py", "file_size_in_byte": 623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "awkward.Array", "line_number": 9, "usage_type": "call"}, {"api_name": "awkward.to_list", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.importorskip", "line_number": 17, "usage_type": "call"}, {"api_name": "awkward.Array", "line_number": 21, "usage_type": "call"}, {"api_name": "awkward.to_list", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "564153685", "text": "#THIS SCRIPT FETCHES USER_IDs FROM DATABASE AND DOWNLOADS ALL THE FOLLOWERS OF EACH USER_ID\n#IN CASE OF THE API THRESHOLD IS REACHED IT WILL WAIT AND CALL THE API AGAIN IN 5 MINUTES\n#QUERY TO FETCH THE USER_IDs CAN BE CHANGED AS NEEDED\nimport oauth2\nimport json\nimport psycopg2 as pc\nimport psycopg2.extras\nimport time\nimport sys\n\n\nconn = pc.connect(\"host=YourHost user=YourUser password=YourPass dbname=YourDB\")\nc1 = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\nc2 = conn.cursor()\nc1.execute(\"WITH CTE AS (SELECT DISTINCT TWEET#>'{user,screen_name}' AS user_name,TWEET#>'{user,id}' AS user_id,(TWEET#>>'{user,followers_count}')::int as followers_count\\\n ,row_number() over(partition by TWEET#>'{user,screen_name}',TWEET#>'{user,id}' order by (TWEET#>>'{user,followers_count}')::int asc)\\\n FROM PARIS_ALL A WHERE EXISTS (SELECT TWEET_ID FROM HASHTAGS_PARIS_ALL WHERE TWEET_ID = A.TWEET_ID AND hashtag = 'ParisAttacks'\\\n ) AND NOT EXISTS (SELECT USER_ID FROM FOLLOWERS WHERE USER_ID = (A.TWEET#>>'{user,id}')::BIGINT)\\\n AND NOT EXISTS (SELECT USER_ID FROM FOLLOWERS_OF_FOLLOWERS WHERE USER_ID = (A.TWEET#>>'{user,id}')::BIGINT) AND RETWEETS > 0\\\n ORDER BY (TWEET#>>'{user,followers_count}')::int ASC)\\\n SELECT * FROM CTE WHERE ROW_NUMBER=1;\")\ndef oauth_req(url, key, secret, http_method='GET', post_body='', http_headers=None):\n consumer = oauth2.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\n token = oauth2.Token(key=key, secret=secret)\n client = oauth2.Client(consumer, token)\n resp, content = client.request( url, method=http_method, body=post_body, headers=http_headers )\n return content\nCONSUMER_KEY = \"YourConsumerKey\";\nCONSUMER_SECRET = \"YourConsumerSecret\";\nACCESS_TOKEN = \"YourAccessToken\";\nACCESS_SECRET = \"YourAccessSecret\";\n\nfor row in c1:\n cursor = -1\n print(row[\"user_name\"])\n response = oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET)\n followers = json.loads(response)\n #print(followers)\n i = 0\n while(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Rate limit exceeded'):\n print('Sleeping for 5 min...')\n time.sleep(300)\n followers = json.loads(oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET))\n print(followers)\n if(followers.has_key('errors') == True):\n print(str(followers['errors'][0]['message']))\n if(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Sorry, that page does not exist.'):\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],0)\n c2.execute(sql)\n if (followers.has_key(\"ids\")):\n print(len(followers['ids']))\n next_cursor = followers['next_cursor']\n for id in followers['ids']:\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],id)\n c2.execute(sql)\n i=i+1\n if(i%1000==0):\n print(i)\n while(next_cursor!=0):\n response = oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(next_cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET)\n followers = json.loads(response)\n #print(followers)\n while(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Rate limit exceeded'):\n print('Sleeping for 5 min...')\n time.sleep(300)\n followers = json.loads(oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(next_cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET))\n print(followers)\n next_cursor = followers['next_cursor']\n for id in followers['ids']:\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],id)\n c2.execute(sql)\n i=i+1\n if(i%10000==0):\n print(i)\n conn.commit()\n else:\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],0)\n c2.execute(sql)\n conn.commit()\n print(\"Profile private or deleted\")\nc1.close()\nc2.close()\nconn.close()\n \n \n\n", "sub_path": "twitter_search_api_followers.py", "file_name": "twitter_search_api_followers.py", "file_ext": "py", "file_size_in_byte": 4561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "psycopg2.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 13, "usage_type": "attribute"}, {"api_name": "oauth2.Consumer", "line_number": 23, "usage_type": "call"}, {"api_name": "oauth2.Token", "line_number": 24, "usage_type": "call"}, {"api_name": "oauth2.Client", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "131005703", "text": "##@namespace produtil.mpi_impl.mpi_impl_base\n# Utilities like CMDFGen to simplify adding new MPI implementations to the \n# produtil.run suite of modules.\n#\n# This module contains classes and functions to assist developers in\n# extending the functionality of the produtil.mpi_impl package. The\n# main highlight is the CMDFGen, which generates command files. Some\n# MPI implementations, and the mpiserial program, want to read a file\n# with one line per MPI rank telling what program to run on each rank.\n# For example, LSF+IBMPE and LoadLeveler+IBMPE work this way if one\n# wants to run different programs on different ranks.\n\nimport tempfile,stat,os, logging\n\nmodule_logger=logging.getLogger('produtil.mpi_impl')\n\nclass MPIConfigError(Exception): \n \"\"\"!Base class of MPI configuration exceptions.\"\"\"\nclass WrongMPI(MPIConfigError): \n \"\"\"!Unused: raised when the wrong MPI implementation is accessed. \"\"\"\nclass MPISerialMissing(MPIConfigError):\n \"\"\"!Raised when the mpiserial program is required, but is missing.\"\"\"\nclass MPIAllRanksError(MPIConfigError):\n \"\"\"!Raised when the allranks=True keyword is sent to mpirun or mpirunner,\nbut the MPI program specification has more than one rank.\"\"\"\nclass MPIMixed(MPIConfigError):\n \"\"\"!Thrown to indicate serial and parallel processes are being mixed in a single mpi_comm_world.\"\"\"\nclass MPIDisabled(MPIConfigError):\n \"\"\"!Thrown to MPI is not supported.\"\"\"\nclass OpenMPDisabled(MPIConfigError):\n \"\"\"!Raised when OpenMP is not supported by the present implementation.\"\"\"\nclass CMDFGen(object):\n \"\"\"!Generates files with one line per MPI rank, telling what\n program to run on each rank.\n\n This class is used to generate command files for mpiserial, poe or\n mpirun.lsf. Command files are files with one MPI rank per line\n containing a shell command to run for that rank. Generally the\n input (lines) is generated by the to_arglist function in a\n subclass of produtil.mpiprog.MPIRanksBase. See the\n produtil.mpi_impl.mpirun_lsf for an example of how to use this.\"\"\"\n def __init__(self,base,lines,cmd_envar='SCR_CMDFILE',\n model_envar=None,filename_arg=False,**kwargs):\n \"\"\"!CMDFGen constructor\n \n @param base type of command file being generated. See below.\n @param lines the command file contents as a list of strings, one per line\n @param cmd_envar environment variable to set to the command file path\n @param model_envar environment variable to set to \"MPMD\" \n @param kwargs Sets the command file name. See below.\n @param filename_arg If True, the name of the command file is appended to the program argument list.\n\n The command file is generated from\n tempfile.NamedTemporaryFile, passing several arguments from\n kwargs, if provided, or suitable defaults otherwise. There\n are several arguments used. In all cases, replace \"base\" with\n the contents of the @c base argument:\n\n * base_suffix --- temporary file suffix (default: \"base.\")\n * base_prefix --- temporary file prefix (default: \".cmdf\")\n * base_tempdir --- directory in which to create the file\n\n @bug The base_suffix keyword is used for both the suffix and prefix\"\"\"\n assert(base is not None)\n assert(isinstance(lines,list))\n assert(len(lines)>0)\n assert(isinstance(lines[0],basestring))\n assert(len(lines[0])>0)\n self.filename=kwargs.get(str(base),None)\n self.tmpprefix=kwargs.get('%s_suffix'%(base,),'%s.'%(base,))\n self.tmpsuffix=kwargs.get('%s_suffix'%(base,),'.cmdf')\n self.tmpdir=kwargs.get('%s_tmpdir'%(base,),'.')\n self.cmd_envar=cmd_envar\n self.model_envar=model_envar\n self.filename_arg=filename_arg\n out='\\n'.join(lines)\n if len(out)>0:\n out+='\\n'\n self.cmdf_contents=out\n return\n ##@var filename\n # command file's filename\n\n ##@var tmpprefix \n # temporary file prefix\n \n ##@var tmpsuffix\n # temporary file suffix\n\n ##@var tmpdir\n # temporary file directory\n\n ##@var cmd_envar\n # Environment variable to set telling the path to the\n # command file\n\n ##@var model_envar\n # Environment variable to set to \"MPMD\"\n\n ##@var cmdf_contents\n # String containing the command file contents.\n\n def _add_more_vars(self,envars,logger):\n \"\"\"!Adds additional environment variables to the envars dict,\n needed to configure the MPI implementation correctly. This is\n used to set MP_PGMMODEL=\"MPMD\" if the constructor receives\n model_envar=\"MP_PGMMODEL\".\n\n @param envars[out] the dict to modify\n @param logger a logging.Logger for log messages\"\"\"\n if self.model_envar is not None:\n if logger is not None:\n logger.info('Set %s=\"MPMD\"'%(self.model_envar,))\n envars[self.model_envar]='MPMD'\n def __call__(self,runner,logger=None):\n \"\"\"!Adds the environment variables to @c runner and creates the command file.\n\n @param[out] runner A produtil.prog.Runner to modify\n @param logger a logging.Logger for log messages\"\"\"\n if logger is None: logger=module_logger\n if self.filename is not None:\n with open(self.filename,'wt') as f:\n f.write(self.cmdf_contents)\n if logger is not None:\n logger.info('Write command file to %s'%(repr(filename),))\n kw={self.cmd_envar: self.filename}\n self._add_more_vars(kw,logger)\n if logger is not None:\n for k,v in kw.iteritems():\n logger.info('Set %s=%s'%(k,repr(v)))\n if self.filename_arg:\n runner=runner[self.filename]\n return runner.env(**kw)\n else:\n with tempfile.NamedTemporaryFile(mode='wt',suffix=self.tmpsuffix,\n prefix=self.tmpprefix,dir=self.tmpdir,delete=False) as t:\n if logger is not None:\n logger.info('Write command file to %s'%(repr(t.name),))\n t.write(self.cmdf_contents)\n # Make the file read-only and readable for everyone:\n os.fchmod(t.fileno(),stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)\n kw={self.cmd_envar: t.name}\n self._add_more_vars(kw,logger)\n if logger is not None:\n for k,v in kw.iteritems():\n logger.info('Set %s=%s'%(k,repr(v)))\n runner.env(**kw)\n if self.filename_arg:\n runner=runner[t.name]\n return runner\n", "sub_path": "NEMS/tests/produtil/ush/produtil/mpi_impl/mpi_impl_base.py", "file_name": "mpi_impl_base.py", "file_ext": "py", "file_size_in_byte": 6664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 135, "usage_type": "call"}, {"api_name": "os.fchmod", "line_number": 141, "usage_type": "call"}, {"api_name": "stat.S_IRUSR", "line_number": 141, "usage_type": "attribute"}, {"api_name": "stat.S_IRGRP", "line_number": 141, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 141, "usage_type": "attribute"}]} +{"seq_id": "462173021", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 8 13:19:54 2021\r\n\r\n@author: 91937\r\n\"\"\"\r\n\r\n\r\n# implementation of queue\r\n# based on first in first out\r\n# enqueue opration means insertion \r\n# dequeue opration means deletion\r\n\r\n\r\n# implementation using list\r\nclass queue:\r\n def __init__(self):\r\n self.q = []\r\n \r\n def enqueue(self,item):\r\n self.q.insert(0,item)\r\n \r\n def dequeue(self):\r\n self.q.pop() \r\n# we can define seek and length function also\r\n# we are inserting 1 then 2 then 3\r\n# so in queue -----> 3--2--1 \r\na = queue()\r\na.enqueue(1)\r\na.enqueue(2)\r\na.enqueue(3)\r\nprint(a.q)\r\n# perform dequeue operation on the queue so output will be ----> 3--2 \r\na.dequeue()\r\nprint(a.q)\r\n\r\n\r\n\r\n# implementing using collection module\r\nfrom collections import deque\r\nq1 = deque()\r\nq1.appendleft('akash') \r\nq1.appendleft('ashok') \r\nq1.appendleft('kamerkar')\r\nprint(q1) \r\nq1.pop()\r\nprint(q1)\r\n", "sub_path": "python_basic_queue_using_deque_list.py", "file_name": "python_basic_queue_using_deque_list.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.deque", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "177119114", "text": "from flask import render_template\nimport connexion\n\n# Создадим экземпляр приложения\napp = connexion.App(__name__, specification_dir='./')\n# Прочитаем файл swagger.yml для настройки конечных точек\napp.add_api('swagger.yml')\n\n\n# Создадим маршрут URL в нашем приложении для \"/\"\n@app.route('/')\ndef home():\n \"\"\"\n Эта функция просто отвечает на URL \"localhost:5000/\" в браузера\n\n :return: подствляет шаблон 'home.html'\n \"\"\"\n return render_template('home.html')\n\n\n# Если мы работаем в автономном режиме, запускаем приложение\nif __name__ == '__main__':\n app.run(host='localhost', port=1356, debug=True)", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "connexion.App", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "374888412", "text": "import matplotlib.pyplot as plt\n\n\na=[0,1,2,3,4,5,6,7]\nb=[10,12,13,14,15,12,16,12]\nc=[20,28,26,24,26,27,21,28]\nd=[41,47,49,45,43,49,45,46]\nplt.title(\"My Graph\")\nplt.plot(a,label=\"first\")\nplt.plot(a,c,label=\"second\")\nplt.plot(a,d,label=\"third\",lw=5)\nplt.legend(loc=\"best\",shadow=True)\n\nplt.grid()\nplt.show()\n\n", "sub_path": "requirements/venky_task/AI/matplotlib/2.py", "file_name": "2.py", "file_ext": "py", "file_size_in_byte": 307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.title", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "275255090", "text": "#!/bin/python\n\nimport requests\nimport pprint\n\nhost = \"10.0.1.103\"\n#host = \"10.0.1.124\"\n\nurl = 'https://cifarelli.net/alexa/temps'\n#url = 'http://%s:8080/alexaskills/' % host\n#url = 'http://%s:8080/alexaskills/rest/temps' % host\n#url = 'http://%s:8080/alexaskills/rest/temps/7fc6eb1a-2af1-40ca-8706-d9c546c93ea9' % host\n#url = 'http://%s:8080/alexaskills/rest/temps/Attic Temperature' % host\ndata = \"\"\"{\"type\":\"SmartThings\",\n \"title\":\"Secure\", \n \"description\":\n \"\"}\"\"\"\nheaders = {\"Content-Type\": \"application/json\"}\nresp = requests.get(url, headers=headers)\n#resp = requests.post(url, headers=headers, data=data)\nprint(\"Status returned: \", resp.status_code);\nif (resp.headers['content-type'] == 'application/json'):\n pp = pprint.PrettyPrinter(indent=3);\n pp.pprint(resp.json());\nelse:\n print(resp.text);\n\n", "sub_path": "python/alexa_test.py", "file_name": "alexa_test.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "pprint.PrettyPrinter", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "280181508", "text": "import pymongo\nfrom word2vec import LineSentence\n\n# in_file_name = '../wiki_ja/wakati/jawiki-100.txt'\nin_file_name = '../wiki_ja/wakati/jawiki-20160407-pages-articles-multistream.xml-001.txt'\n# in_file = open(in_file_name, 'r')\nline_sentence = LineSentence(in_file_name)\ncollection = pymongo.MongoClient()['wikipedia']['sentence']\ncollection.create_index([('sent_id', pymongo.ASCENDING)],\n unique=True)\n\nfor sent_num, line in enumerate(line_sentence):\n sent_id = 'sent_{}'.format(sent_num)\n collection.insert({'sent_id': sent_id,\n 'content': line})\n\n", "sub_path": "lang_analysis/sentence2vec/save_sentence.py", "file_name": "save_sentence.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "word2vec.LineSentence", "line_number": 7, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "pymongo.ASCENDING", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "202362376", "text": "from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('iconic/', views.iconic, name=\"iconic\"),\n path('events/', views.events, name=\"events\"),\n path('experience/', views.experience, name=\"experience\"),\n path('festival/', views.festival, name=\"festival\"),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)", "sub_path": "apps/nearby/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "49976515", "text": "from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport unittest\nfrom mobile.home import HomeMobile\nfrom mobile.payment import PaymentMobile\n\nclass PD221Mobile(unittest.TestCase):\n \n def setUp(self):\n self.driver = webdriver.Chrome(\"E:\\ourdeal\\selenium\\chromedriver\")\n \n def tearDown(self):\n pass\n \n def xtest_credit_card_transaction_with_existing_member(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/parcfitness-3-month-gym-membership-bellevue-hill-rose-bay-and-gymtime-balgowlah-locations-feb\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(2)\n paymentMobile.enterDealerLocation()\n paymentMobile.applyCredit()\n #paymentMobile.enterPhone()\n #paymentMobile.agreeTC()\n #payflowMobile = paymentMobile.clickCheckOutWithCreditCard()\n #payflowMobile.enterCreditCardDetail()\n \n \n def xtest_credit(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/parcfitness-3-month-gym-membership-bellevue-hill-rose-bay-and-gymtime-balgowlah-locations-feb\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(2)\n paymentMobile.enterDealerLocation()\n paymentMobile.applyCredit()\n \n def xtest_discount_code(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/parcfitness-3-month-gym-membership-bellevue-hill-rose-bay-and-gymtime-balgowlah-locations-feb\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(3)\n paymentMobile.enterDealerLocation()\n paymentMobile.applyDiscountCode()\n \n \n def xtest_discount_code_plus_credit(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/nifty-spot-in-car-iphone--to-stereo-transmitter\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(5)\n paymentMobile.enterShippingDetails()\n paymentMobile.enterDealerLocation()\n paymentMobile.enterCustomData()\n paymentMobile.tickOptInText(0)\n #paymentMobile.applyDiscountCode()\n #paymentMobile.applyCredit()\n paymentMobile.enterPhone()\n payFlowPage = paymentMobile.clickCheckOutWithCreditCard()\n payFlowPage.enterCreditCardDetail()\n \n \n \n ", "sub_path": "PD-221/PD221Mobile.py", "file_name": "PD221Mobile.py", "file_ext": "py", "file_size_in_byte": 2744, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "mobile.home.HomeMobile", "line_number": 20, "usage_type": "call"}, {"api_name": "mobile.home.HomeMobile", "line_number": 35, "usage_type": "call"}, {"api_name": "mobile.home.HomeMobile", "line_number": 45, "usage_type": "call"}, {"api_name": "mobile.home.HomeMobile", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "412047977", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('team_builder', '0004_auto_20160208_2217'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='team',\n options={'verbose_name': 'Team', 'verbose_name_plural': 'Teams'},\n ),\n migrations.AddField(\n model_name='club',\n name='slug',\n field=models.SlugField(max_length=100, null=True, blank=True),\n ),\n ]\n", "sub_path": "FancyArena/apps/team_builder/migrations/0005_auto_20160222_1137.py", "file_name": "0005_auto_20160222_1137.py", "file_ext": "py", "file_size_in_byte": 577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "400385145", "text": "from keras.layers import Bidirectional, LSTM, Input, RepeatVector, Dense\nfrom keras.layers import GlobalAveragePooling1D, concatenate, GlobalMaxPooling1D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.models import Model\nfrom keras.layers.core import Activation\n\nfrom model_architectures.attention import AttentionLayer, AttentionWithContext\n\nclass SentencePair:\n def __init__(self, hidden_size=512, no_classes=1, use_attention=False):\n self.hidden_size = hidden_size\n self.no_classes = no_classes\n\n self.use_attention = use_attention\n\n def build(self, input_shape=[(400, 256), (256,)]):\n story_input = Input(shape=input_shape[0], name='story_input')\n x1 = Bidirectional(LSTM(self.hidden_size, return_sequences=True, kernel_initializer='glorot_uniform'), name='Bidirectional-1')(story_input)\n x1 = Bidirectional(LSTM(self.hidden_size, return_sequences=True, kernel_initializer='glorot_uniform'), name='Bidirectional-2')(x1)\n\n if self.use_attention:\n #x1 = AttentionLayer(x1)\n x1 = AttentionWithContext()(x1)\n else:\n x2 = GlobalMaxPooling1D()(x1)\n x1 = GlobalAveragePooling1D()(x1)\n x1 = concatenate([x1, x2])\n\n section_input = Input(shape=input_shape[1], name='section_input')\n x2 = Dense(self.hidden_size*2, kernel_initializer='glorot_uniform', name='Dense-1')(section_input)\n x2 = LeakyReLU(0.2)(x2)\n\n x = concatenate([x1, x2])\n\n x = Dense(self.hidden_size*2, kernel_initializer='glorot_uniform', name='Dense-2')(x)\n x = LeakyReLU(0.2)(x)\n x = Dense(self.hidden_size*2, kernel_initializer='glorot_uniform', name='Dense-3')(x)\n x = LeakyReLU(0.2)(x)\n\n x = Dense(self.no_classes, kernel_initializer='glorot_uniform', name='output')(x)\n x = Activation('sigmoid', name='sigmoid')(x)\n\n return Model(inputs=[story_input, section_input], outputs=x)\n\nif __name__ == '__main__':\n model = SentencePair(use_attention=False)\n model = model.build()\n model.summary()\n", "sub_path": "model_architectures/sentence_pair.py", "file_name": "sentence_pair.py", "file_ext": "py", "file_size_in_byte": 2078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.layers.Input", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 19, "usage_type": "call"}, {"api_name": "model_architectures.attention.AttentionWithContext", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "643529171", "text": "from PIL import Image, ImageFile\nimport fileModule\nimport io, time\n\ndef main(args):\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n fm = fileModule.FileManager()\n begin = time.time()\n data = fm.loadFile(args[\"id\"])\n elapsed = time.time() - begin\n print (\"read Time \" + repr(elapsed))\n image = Image.open(io.BytesIO(data.read()))\n\n image.thumbnail((200, 200), Image.ANTIALIAS)\n newImage = io.BytesIO()\n image.save(newImage, args[\"formatOut\"])\n newImage.seek(0)\n begin = time.time()\n retId = fm.saveFile(newImage, \"image.\" + args[\"formatOut\"])\n elapsed = time.time() - begin\n print (\"write Time \" + repr(elapsed))\n return {\"retId\": retId}\n", "sub_path": "sample_actions/images/resize.py", "file_name": "resize.py", "file_ext": "py", "file_size_in_byte": 678, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 6, "usage_type": "name"}, {"api_name": "fileModule.FileManager", "line_number": 7, "usage_type": "call"}, {"api_name": "time.time", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 14, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "429923724", "text": "from flask import request, render_template, redirect, url_for, flash\nfrom flask_login import login_required\nfrom app.main import main\nfrom app.main.forms import CreateKeyForm\nfrom app import api_key_api_client\nfrom app.utils import user_has_permissions\n\n\n@main.route(\"/services//api-keys\")\n@login_required\n@user_has_permissions('manage_api_keys')\ndef api_keys(service_id):\n return render_template(\n 'views/api-keys.html',\n keys=api_key_api_client.get_api_keys(service_id=service_id)['apiKeys']\n )\n\n\n@main.route(\"/services//api-keys/create\", methods=['GET', 'POST'])\n@login_required\n@user_has_permissions('manage_api_keys')\ndef create_api_key(service_id):\n key_names = [\n key['name'] for key in api_key_api_client.get_api_keys(service_id=service_id)['apiKeys']\n ]\n form = CreateKeyForm(key_names)\n if form.validate_on_submit():\n secret = api_key_api_client.create_api_key(\n service_id=service_id,\n key_name=form.key_name.data,\n key_type=form.key_type.data\n )\n return render_template('views/api-keys/show.html', secret=secret,\n key_name=form.key_name.data)\n return render_template(\n 'views/api-keys/create.html',\n form=form\n )\n\n\n@main.route(\"/services//api-keys/revoke/\", methods=['GET', 'POST'])\n@login_required\n@user_has_permissions('manage_api_keys')\ndef revoke_api_key(service_id, key_id):\n key_name = api_key_api_client.get_api_keys(service_id=service_id, key_id=key_id)['apiKeys'][0]['name']\n if request.method == 'GET':\n return render_template(\n 'views/api-keys/revoke.html',\n key_name=key_name\n )\n elif request.method == 'POST':\n api_key_api_client.revoke_api_key(service_id=service_id, key_id=key_id)\n flash('‘{}’ was revoked'.format(key_name), 'default_with_tick')\n return redirect(url_for('.api_keys', service_id=service_id))\n", "sub_path": "app/main/views/api_keys.py", "file_name": "api_keys.py", "file_ext": "py", "file_size_in_byte": 1992, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "app.api_key_api_client.get_api_keys", "line_number": 15, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 15, "usage_type": "name"}, {"api_name": "app.main.main.route", "line_number": 9, "usage_type": "call"}, {"api_name": "app.main.main", "line_number": 9, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 10, "usage_type": "name"}, {"api_name": "app.utils.user_has_permissions", "line_number": 11, "usage_type": "call"}, {"api_name": "app.api_key_api_client.get_api_keys", "line_number": 24, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 24, "usage_type": "name"}, {"api_name": "app.main.forms.CreateKeyForm", "line_number": 26, "usage_type": "call"}, {"api_name": "app.api_key_api_client.create_api_key", "line_number": 28, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "app.main.main.route", "line_number": 19, "usage_type": "call"}, {"api_name": "app.main.main", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 20, "usage_type": "name"}, {"api_name": "app.utils.user_has_permissions", "line_number": 21, "usage_type": "call"}, {"api_name": "app.api_key_api_client.get_api_keys", "line_number": 45, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "app.api_key_api_client.revoke_api_key", "line_number": 52, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 54, "usage_type": "call"}, {"api_name": "app.main.main.route", "line_number": 41, "usage_type": "call"}, {"api_name": "app.main.main", "line_number": 41, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 42, "usage_type": "name"}, {"api_name": "app.utils.user_has_permissions", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "587965867", "text": "from multiprocessing import Pool\nimport asyncio\nimport time\n\n\nasync def test(time):\n print(\"start\")\n await asyncio.sleep(time) # 调用协程内的时间间隔\n print(\"end\")\n\n\nasync def main(num):\n start_time = time.time()\n tasks = [asyncio.create_task(test(3)) for proxy in range(num)] # 注册任务列表\n print(len(tasks))\n print(\"协程结束时间:\", time.time() - start_time)\n\n\ndef run(num):\n asyncio.run(main(num)) # 使用协程调用方法\n\n\nif __name__ == \"__main__\":\n \"\"\"\n start_time = time.time()\n p = Pool()\n # 启动多个进程,在每个进程内运行协程任务\n for i in range(4):\n # apply(): 阻塞主进程, 并且一个一个按顺序地执行子进程, 等到全部子进程都执行完毕后 ,继续执行 apply()后面主进程的代码\n # apply_async() 非阻塞异步的, 他不会等待子进程执行完毕, 主进程会继续执行, 他会根据系统调度来进行进程切换\n p.apply_async(run, args=(10,))\n p.close()\n p.join()\n print(\"进程结束时间:\", time.time() - start_time)\n\n \"\"\"\n run(10)\n", "sub_path": "week07/note/进程与协程.py", "file_name": "进程与协程.py", "file_ext": "py", "file_size_in_byte": 1122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "asyncio.sleep", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 13, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "392668981", "text": "from queue_req_resp import RabbitMQ\nimport queue\nimport logging\nfrom logging.handlers import QueueHandler, QueueListener\nimport datetime\n\nclass LoggerClient():\n def __init__(self,logfile_path,console=False):\n \"\"\"\n Logger API at Client Side to store the logs locally and sent to Central Logger MQ\n Parameters - RMQ - Create a RabbitMQ Object and pass it \n - logfile_path - Path where to create log file\n - console - whether to diaplay log messages on screen - Default false\n \"\"\"\n self.RMQ = RabbitMQ()\n #Creating queue and logger\n self.log_queue = queue.Queue(-1) #infinite size\n self.queue_handler = QueueHandler(self.log_queue)\n self.logger = logging.getLogger()\n self.logger.addHandler(self.queue_handler)\n #formatter\n self.formatter = logging.Formatter(' %(message)s')\n #file handler - write to file\n self.file_handler_loc = logging.FileHandler(logfile_path)\n self.file_handler_loc.setFormatter(self.formatter)\n #console handler - print on screen\n if(console == True):\n self.console_handler = logging.StreamHandler()\n self.console_handler.setFormatter(self.formatter)\n self.listener = QueueListener(self.log_queue,self.console_handler,self.file_handler_loc )\n else:\n self.listener = QueueListener(self.log_queue,self.file_handler_loc )\n\n\n def start_logger(self):\n self.listener.start()\n\n def emit(self, record):\n return self.queue_handler.emit(record)\n\n def __del__(self):\n self.listener.stop()\n\n def log(self,msg):\n time=datetime.datetime.now().strftime(\"%d-%m-%y %H:%M:%S\")\n msg=\"[\"+time+\"] : \"+msg\n self.logger.error(msg)\n msg+=\"\\n\"\n self.RMQ.send(\"\", \"To_Log\", msg)\n\n###README\n#Create RabbitMQ Object\n#Create LoogerClient Object by passing required parameters\n#call start_logger() using this Object\n#Now you can use this object to log - call Object.log(msg)\n#The log message will be saved along with date in local log file (file_path in parameters) and send to central logger through queue (and also to the console based on parameter passed)\n#Example: (test_logclient.py)\n#----------------------------\n# from logger_client import LoggerClient\n# from queue_req_resp import RabbitMQ\n# import time\n\n# def test():\n# LC = LoggerClient(\"./test_log.log\",console=True)\n# LC.start_logger()\n# for i in range(4):\n# time.sleep(1)\n# LC.log('This is a warning message')\n# LC.log('This is an error message') \n# return \n\n# test()", "sub_path": "Logger/logger_client.py", "file_name": "logger_client.py", "file_ext": "py", "file_size_in_byte": 2643, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "queue_req_resp.RabbitMQ", "line_number": 15, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.handlers.QueueHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.handlers.QueueListener", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.handlers.QueueListener", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "265443078", "text": "from django import template\nfrom django.utils.safestring import mark_safe\nfrom django.template.loader import render_to_string\n#from django.core.cache import cache\nfrom django.template.defaultfilters import escape\n\n\nfrom modules.templatetags.module_filters import truncate_chars_by_words\nfrom django.utils.html import strip_tags\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef list_pages(context):\n website = context['request'].website\n pages = website.get_pages()\n return mark_safe(render_to_string(\"pages/list_pages.html\", {\"pages\": pages}))\n\n\ndef get_meta_content(meta_content, modules, context):\n \"\"\"To-do: write docs string here\"\"\"\n if not meta_content:\n meta_content = ''\n article_meta_variables = [':articleTitle', ':articleDescription', ':articleThumb']\n topic_meta_variables = [':topicThumb', ':topicName', ':TopicDescription']\n image_meta_variables = [':imageURL', ':imageDescription', ':imageIndex']\n all_meta_variables = [':currentURL'] + article_meta_variables + topic_meta_variables + image_meta_variables\n\n for meta_variable in all_meta_variables:\n if meta_content.find(meta_variable) > -1:\n for module in modules:\n # Article related meta\n if meta_variable in article_meta_variables and module.module_type == \"article\":\n article = module.render_article(context, render=False)\n if article:\n if meta_variable == ':articleTitle':\n meta_content = meta_content.replace(':articleTitle', article.title)\n if meta_variable == ':articleDescription':\n meta_content = meta_content.replace(':articleDescription',\n escape(truncate_chars_by_words(strip_tags(article.description), 500)))\n if meta_variable == ':articleThumb':\n meta_content = meta_content.replace(':articleThumb', article.thumbnail)\n break\n # Topic related meta\n if meta_variable in topic_meta_variables and module.module_type == \"topic-name\":\n topic = module.render_topic_name(context, render=False)\n if topic:\n if meta_variable == ':topicName':\n meta_content = meta_content.replace(':topicName', topic.name)\n if meta_variable == ':TopicDescription':\n meta_content = meta_content.replace(':TopicDescription',\n escape(truncate_chars_by_words(strip_tags(topic.description), 500)))\n if meta_variable == ':topicThumb':\n meta_content = meta_content.replace(':topicThumb', topic.image_url)\n break\n\n # Image related meta\n if meta_variable in image_meta_variables and module.module_type == \"image-gallery\":\n image_gallery_data = module.render_image_gallery(context, render=False)\n try:\n image = image_gallery_data['image']\n if meta_variable == ':imageURL':\n meta_content = meta_content.replace(':imageURL', image.link)\n if meta_variable == ':imageDescription':\n meta_content = meta_content.replace(':imageDescription',\n escape(truncate_chars_by_words(strip_tags(image.description), 500)))\n if meta_variable == ':imageIndex':\n meta_content = meta_content.replace(':imageIndex', str(image.order))\n except:\n pass\n break\n\n # set current url\n if meta_variable == ':currentURL':\n meta_content = meta_content.replace(':currentURL', context['request'].build_absolute_uri())\n return meta_content\n\n\n@register.simple_tag(takes_context=True)\ndef page_seo(context):\n page = context['page']\n modules = context['modules']\n cache_key = page.get_cache_key(context, 'seo')\n page_meta_html = page.get_cache(context, cache_key=cache_key)\n if not page_meta_html:\n page_metas = \"\"\n for meta in page.metas.all():\n meta_name = meta.name.strip()\n meta_content = meta.content.strip()\n if meta_content:\n meta_content = get_meta_content(meta_content, modules, context)\n page_metas += '\\n'\n page_meta_html = page_metas\n page.set_cache(context, page_meta_html, cache_key=cache_key)\n\n return mark_safe(page_meta_html)\n\n\n@register.simple_tag(takes_context=True)\ndef page_title(context):\n \"\"\"\n get page title meta\n it set a context variable context['page_title_html']\n if its get a title then it will set the context variable\n if not then nothings\n\n Example :\n {% page_title %}\n {% if page_title_html %}\n {{ page_title_html }}\n {% else %}\n EntertaiNow.com News Network\n {% endif %}\n\n \"\"\"\n page = context['page']\n modules = context['modules']\n page_title = \"\"\n if page.page_title:\n page_title = page.page_title\n cache_key = page.get_cache_key(context, 'title')\n html_title = page.get_cache(context, cache_key=cache_key)\n if not html_title:\n page_title = get_meta_content(page_title, modules, context)\n page.set_cache(context, page_title, cache_key=cache_key)\n context['page_title_html'] = page_title\n else:\n context['page_title_html'] = html_title\n else:\n context['page_title_html'] = page_title\n return \"\"", "sub_path": "pages/templatetags/page_tags.py", "file_name": "page_tags.py", "file_ext": "py", "file_size_in_byte": 5860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.template.Library", "line_number": 11, "usage_type": "call"}, {"api_name": "django.template", "line_number": 11, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 18, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 18, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters", "line_number": 32, "usage_type": "name"}, {"api_name": "django.template.defaultfilters.escape", "line_number": 41, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters.truncate_chars_by_words", "line_number": 41, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 41, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.escape", "line_number": 53, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters.truncate_chars_by_words", "line_number": 53, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 53, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.escape", "line_number": 67, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters.truncate_chars_by_words", "line_number": 67, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 67, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters", "line_number": 83, "usage_type": "name"}, {"api_name": "modules.templatetags.module_filters", "line_number": 92, "usage_type": "argument"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 97, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters", "line_number": 118, "usage_type": "name"}, {"api_name": "modules.templatetags.module_filters", "line_number": 125, "usage_type": "argument"}]} +{"seq_id": "272972854", "text": "__author__ = \"Jeremy Nelson\"\n\nfrom tensorflow.keras.layers import Flatten, Dense # type: ignore\nfrom tensorflow.keras.models import Sequential # type: ignore\n\nfrom config import AIKI_NAMES, CLASS_NAMES, IMG_HEIGHT, IMG_WIDTH\n\n\ndef feedforward_model(class_names: list = CLASS_NAMES) -> Sequential:\n model = Sequential([\n # input layer\n Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, 1)),\n # first hidden layer\n Dense(64, activation='relu'),\n # second hidden layer\n Dense(64, activation='relu'),\n # output layer\n Dense(len(class_names), activation='softmax')\n ])\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n # Model Summary\n model.summary()\n return model\n\n\ndef aiki_feedforward() -> Sequential:\n return feedforward_modal(AIKI_NAMES)\n\n\ndef digits_feedforward() -> Sequential:\n digits = [i for i in range(0, 10)]\n return feedforward_modal(digits)\n", "sub_path": "models/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "config.CLASS_NAMES", "line_number": 9, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 12, "usage_type": "call"}, {"api_name": "config.IMG_HEIGHT", "line_number": 12, "usage_type": "name"}, {"api_name": "config.IMG_WIDTH", "line_number": 12, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 9, "usage_type": "name"}, {"api_name": "config.AIKI_NAMES", "line_number": 30, "usage_type": "argument"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 29, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "654426845", "text": "from collections import Counter\n\nclass Solution:\n def majorityElement(self, nums: [int]) -> [int]:\n count = Counter()\n for num in nums:\n count[num] += 1\n if len(count) == 3:\n new_count = Counter()\n for elem, freq in count.items(): \n if freq != 1: new_count[elem] = freq - 1\n count = new_count\n \n cands = Counter(num for num in nums if num in count) \n return [num for num in cands if cands[num] > len(nums)/3]", "sub_path": "majorityElement.py", "file_name": "majorityElement.py", "file_ext": "py", "file_size_in_byte": 549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.Counter", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "436748428", "text": "import cv2\nimport matplotlib.pyplot as plt\n\ndef readimage(arg):\n img = cv2.imread(arg)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n #BGR to RGB\n#Bilateral filter is a Non linear filter prevents averaging across image edges, while averaging within smooth regions of the image,\n# and so is edge-preserving, while Gaussian Filter is not.\n\ndef contours(image):\n img_blur = cv2.bilateralFilter(image, d = 7,\n sigmaSpace = 75, sigmaColor =75)\n # Convert to grayscale\n #cv2.imshow('img', image)\n img_gray = cv2.cvtColor(img_blur, cv2.COLOR_RGB2GRAY)\n # Apply the thresholding\n a = img_gray.max()\n _, thresh = cv2.threshold(img_gray, a/2+60, a,cv2.THRESH_BINARY_INV)\n\n contours, hierarchy = cv2.findContours(\n image = thresh,\n mode = cv2.RETR_TREE,\n method = cv2.CHAIN_APPROX_SIMPLE)\n\n contours = sorted(contours, key = cv2.contourArea, reverse = True)\n\n c_0 = contours[1]\n # Obtaining the 4 points of the bounding rectangle\n # x, y, w, h = cv2.boundingRect(c_0)\n # img_copy2 = image.copy()\n # img_box = cv2.rectangle(img_copy2, (x, y), (x+w, y+h), color = (0, 255, 0), thickness = 6)\n return c_0\n\nif __name__ == '__main__':\n from sys import argv\n if len(argv) < 2:\n print (\"Usage: python %s \" % argv[0])\n exit()\n img = readimage(argv[1])\n x,y,w,h,contour_img = contours(img)\n#Region of Interest - only the business card area is selected\n roi = img[y:y+h, x:x+w]\n # cv2.imwrite(\"roi.png\", roi)\n\n plt.subplot(1, 2, 1)\n plt.imshow(img)\n plt.axis('off')\n plt.title('Original')\n\n plt.subplot(1, 2, 2)\n plt.imshow(contour_img)\n plt.axis('off')\n plt.title('Contour Image')\n\n plt.show()\n", "sub_path": "CardScanner/Contours.py", "file_name": "Contours.py", "file_ext": "py", "file_size_in_byte": 1818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.bilateralFilter", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 38, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "542461692", "text": "import sys\nimport os\nimport tensorflow as tf\nimport model\nimport joblib\nimport numpy as np\nimport jieba\nfrom tensorflow.python.framework import graph_util\nimport configparser\nfrom data_utils import DataUtils\nimport multiprocessing\nfrom multiprocessing import Process, Manager\nfrom sklearn.model_selection import StratifiedKFold\nfrom data_model import DataModel\nimport re\n\nparams_cfg_file = './params.cfg'\nconfig = configparser.RawConfigParser()\nconfig.read(params_cfg_file)\nbatch_size_list = [int(v) for v in config.get('hyperparameters', 'batch_size_list').split(\",\")]\nlearning_rate_list = [float(v) for v in config.get('hyperparameters', 'learning_rate_list').split(\",\")]\ndropout_list = [float(v) for v in config.get('hyperparameters', 'dropout_list').split(\",\")]\nlayer_num_list = [int(v) for v in config.get('hyperparameters', 'layer_num_list').split(\",\")]\nhidden_num_list = [int(v) for v in config.get('hyperparameters', 'hidden_num_list').split(\",\")]\n\nsave_file_num = config.getint('hyperparameters', 'save_file_num')\nword_vec_size = config.getint('hyperparameters', 'word_vec_size')\nsentence_len = config.getint('hyperparameters', 'sentence_len')\niter_num = config.getint('hyperparameters', 'iter_num')\n\nfold_num = config.getint(\"plugin\", 'fold_num')\nbase_acc = config.getfloat('plugin', 'base_acc')\nbase_f1_score = config.getfloat('plugin', 'base_f1_score')\nsave_pb_mode = config.getboolean('plugin', 'save_pb_mode')\nprint_bad_case_mode = config.getboolean('plugin', 'print_bad_case_mode')\n\nmodel_src = config.get('data', 'model_filepath')\nidx2vec_path = config.get('data', 'idx2vec_filepath')\nword2idx_path = config.get('data', 'word2idx_filepath')\nidx2word_path = config.get('data', 'idx2word_filepath')\nlabel2idx_src = config.get('data', 'label2idx_src')\n\n# 模型可视化\n# writer = tf.summary.FileWriter(\"./model_graph/\" + visual_model_name)\n# writer.add_graph(sess.graph)\n# merged_summary = tf.summary.merge_all()\n# lstm_model.enable_visual(merged_summary)\n\n\n# 模型加载和保存\nfold_model_src_list = ['./save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.4layer_num: 2hidden_num: 500',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.4layer_num: 2hidden_num: 500',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.5layer_num: 2hidden_num: 1000',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.5layer_num: 3hidden_num: 500',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.6layer_num: 2hidden_num: 1000',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.6layer_num: 2hidden_num: 2000',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.6layer_num: 3hidden_num: 500',\n ]\nlayer_num_patten = re.compile('(?<=layer_num: )[0-9]+')\nhidden_num_patten = re.compile('(?<=hidden_num: )[0-9]+')\n\nfor idx, fold_model_src in enumerate(fold_model_src_list):\n tf.reset_default_graph()\n layer_num = int(re.findall(layer_num_patten, fold_model_src)[0])\n hidden_num = int(int(re.findall(hidden_num_patten, fold_model_src)[0]) / 10)\n\n sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))\n lstm_model = model.Model(sentence_len=sentence_len, learning_rate=0.001, word_vec_size=word_vec_size,\n dropout=1, layer_num=layer_num, hidden_num=hidden_num)\n lstm_model.build(sess)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=int(save_file_num))\n ckpt = tf.train.get_checkpoint_state(fold_model_src)\n\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n if save_pb_mode is True:\n constant_graph = tf.graph_util.convert_variables_to_constants(\n sess,\n sess.graph_def,\n ['predict_result/output_result'],\n variable_names_whitelist=None,\n variable_names_blacklist=None\n )\n\n with tf.gfile.FastGFile('./pb_model/' + str(idx) + '.pb', mode='wb') as f:\n f.write(constant_graph.SerializeToString())\n", "sub_path": "generator_pb.py", "file_name": "generator_pb.py", "file_ext": "py", "file_size_in_byte": 4206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "configparser.RawConfigParser", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 59, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 63, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 64, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.GPUOptions", "line_number": 67, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.graph_util.convert_variables_to_constants", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.graph_util", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.FastGFile", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 87, "usage_type": "attribute"}]} +{"seq_id": "151473887", "text": "# uncompyle6 version 3.2.3\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 29 2018, 20:59:26) \n# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]\n# Embedded file name: /Users/ivanjut333/PycharmProjects/HipIES/pipeline/peakfinding.py\n# Compiled at: 2015-07-21 18:12:19\nfrom pylab import *\nfrom scipy import signal\nfrom scipy.ndimage import filters\nfrom hipies import debug\nimport pyqtgraph as pg\nfrom PySide import QtCore\nimport inspect\nmaxfiltercoef = 5\ncwtrange = np.arange(1, 100)\nmaxfiltercoef = 5\ncwtrange = np.arange(3, 100)\ngaussiancentersigma = 2\ngaussianwidthsigma = 5\n\n@debug.timeit\ndef findpeaks(x, y):\n cwtdata = filters.gaussian_filter1d(filters.gaussian_filter1d(signal.cwt(y, signal.ricker, cwtrange), gaussiancentersigma, axis=1), gaussianwidthsigma, axis=0)\n maxima = cwtdata == filters.maximum_filter(cwtdata, 5)\n maximaloc = np.where(maxima == 1)\n x = np.array(x)\n y = np.array(y)\n return list(np.array(np.vstack([x[maximaloc[1]], y[maximaloc[1]], maximaloc])))\n\n\nclass peaktooltip:\n\n def __init__(self, x, y, widget):\n self.q, self.I, self.width, self.index = findpeaks(x, y)\n self.scatterPoints = pg.PlotDataItem(self.q, self.I, size=10, pen=pg.mkPen(None), symbolPen=None, symbolBrush=pg.mkBrush(255, 255, 255, 120), symbol='o')\n self.display_text = pg.TextItem(text='', color=(176, 23, 31), anchor=(0, 1))\n self.display_text.hide()\n widget.addItem(self.scatterPoints)\n widget.addItem(self.display_text)\n self.scatterPoints.scene().sigMouseMoved.connect(self.onMove)\n return\n\n def onMove(self, pixelpos):\n itempos = self.scatterPoints.mapFromScene(pixelpos)\n itemx = itempos.x()\n itemy = itempos.y()\n pixeldelta = 7\n delta = self.scatterPoints.mapFromScene(QtCore.QPointF(pixeldelta + pixelpos.x(), pixeldelta + pixelpos.y()))\n deltax = delta.x() - itemx\n deltay = -(delta.y() - itemy)\n p1 = [ point for point in zip(self.q, self.I) if itemx - deltax < point[0] and point[0] < itemx + deltax and itemy - deltay < point[1] and point[1] < itemy + deltay\n ]\n if len(p1) != 0:\n self.display_text.setText('q=%f\\nI=%f' % (p1[0][0], p1[0][1]))\n self.display_text.setPos(*p1[0])\n self.display_text.show()\n else:\n self.display_text.hide()\n# okay decompiling peakfinding.pyc\n", "sub_path": "pipeline/peakfinding.py", "file_name": "peakfinding.py", "file_ext": "py", "file_size_in_byte": 2438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "scipy.ndimage.filters.gaussian_filter1d", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 23, "usage_type": "name"}, {"api_name": "scipy.signal.cwt", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 23, "usage_type": "name"}, {"api_name": "scipy.signal.ricker", "line_number": 23, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.filters.maximum_filter", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 24, "usage_type": "name"}, {"api_name": "hipies.debug.timeit", "line_number": 21, "usage_type": "attribute"}, {"api_name": "hipies.debug", "line_number": 21, "usage_type": "name"}, {"api_name": "pyqtgraph.PlotDataItem", "line_number": 35, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 35, "usage_type": "call"}, {"api_name": "pyqtgraph.mkBrush", "line_number": 35, "usage_type": "call"}, {"api_name": "pyqtgraph.TextItem", "line_number": 36, "usage_type": "call"}, {"api_name": "PySide.QtCore.QPointF", "line_number": 48, "usage_type": "call"}, {"api_name": "PySide.QtCore", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "101339720", "text": "import sys\nimport angr\n\nproject = angr.Project(sys.argv[1], auto_load_libs=False)\n\nfree_map = {}\n\n\nclass FreeHandler(angr.SimProcedure):\n def run(self, ptr):\n caller_address = hex(self.state.addr)\n free_ptr = hex(self.state.solver.eval(self.state.regs.rdi))\n print(\"Free called on: %s\" % (free_ptr))\n if not free_ptr in free_map:\n free_map[free_ptr] = caller_address\n else:\n print(\n \"Potential Double Free: %s is trying to free %s, which has already been freed by %s\"\n % (caller_address, free_ptr, free_map[free_ptr])\n )\n\n\ndef validate_read(state):\n region = state.inspect.mem_read_address\n if region in free_map:\n free_call = free_map.get(region)\n print(\n \"Potential UAF: %s read from memory freed by %s\"\n % (region, free_call)\n )\n\n\ndef validate_write(state):\n region = state.inspect.mem_write_address\n if region in free_map:\n free_call = free_map.get(region)\n print(\n \"Potential UAF: %s wrote to memory freed by %s\"\n % (region, free_call)\n )\n\n\nproject.hook_symbol(\"free\", FreeHandler())\n\nsimgr = project.factory.simulation_manager()\ninspector = project.factory.entry_state()\n\ninspector.inspect.b(\"mem_write\", when=angr.BP_AFTER, action=validate_write)\ninspector.inspect.b(\"mem_read\", when=angr.BP_AFTER, action=validate_read)\n\nsimgr.run()\n", "sub_path": "uafinator.py", "file_name": "uafinator.py", "file_ext": "py", "file_size_in_byte": 1440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "angr.Project", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 4, "usage_type": "attribute"}, {"api_name": "angr.SimProcedure", "line_number": 9, "usage_type": "attribute"}, {"api_name": "angr.BP_AFTER", "line_number": 48, "usage_type": "attribute"}, {"api_name": "angr.BP_AFTER", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "142965602", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass MADE(nn.Module):\n \"\"\"\n Masked autoencoder for distribution estimation (MADE) as introduced in\n `MADE: Masked Autoencoder for Distribution Estimation `_\n (Germain et al., 2015). In consists of a series of masked linear layers and a given\n non-linearity between them.\n \"\"\"\n\n def __init__(self, *dims, activation=nn.LeakyReLU(), seed=0, permute=False):\n \"\"\"\n Initializes a new MADE model as a sequence of masked linear layers.\n\n Parameters\n ----------\n dims: varargs of int\n Dimensions of input (first), output (last) and hidden layers. At least one hidden layer\n must be defined, i.e. at least 3 dimensions must be given. The output dimension must be\n equal to the input dimension or a multiple of it.\n activation: torch.nn.Module, default: torch.nn.LeakyReLU()\n An activation function to be used after linear layers (except for the output layer).\n This module is shared for all hidden layers.\n seed: int, default: None\n A seed to use for initializing the random number generator for constructing random\n masks for the hidden layers. If set to `None`, deterministic initialization is used.\n permute: bool, default: False\n Whether to arbitrarily permute the input (permutation is applied deterministically).\n \"\"\"\n super().__init__()\n\n if len(dims) < 3:\n raise ValueError(\"MADE model must have at least one hidden layer\")\n if dims[-1] % dims[0] != 0:\n raise ValueError(\"Output dimension must be multiple of the input dimension\")\n\n self.dims = dims\n\n if seed is None:\n m_layers = _generate_sequential(dims)\n else:\n generator = torch.Generator().manual_seed(seed)\n m_layers = _generate_random_numbers(dims, generator, permute)\n\n layers = []\n for i, (in_dim, out_dim) in enumerate(zip(dims, dims[1:])):\n if i > 0:\n layers.append(activation)\n\n hidden = i < len(dims) - 2\n mask = _generate_mask(m_layers[i], m_layers[i+1], hidden=hidden)\n layers.append(_MaskedLinear(in_dim, out_dim, mask=mask))\n\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n Computes the outputs of the MADE model.\n\n Parameters\n ----------\n x: torch.Tensor [..., D]\n The input (input dimension D).\n\n Returns\n -------\n torch.Tensor [..., E]\n The output (output dimension E).\n \"\"\"\n return self.mlp(x)\n\n\nclass _MaskedLinear(nn.Linear):\n\n def __init__(self, in_features, out_features, mask, bias=True):\n super().__init__(in_features, out_features, bias)\n self.register_buffer('mask', mask)\n\n def forward(self, x):\n return F.linear(x, self.weight * self.mask, self.bias)\n\n def __repr__(self):\n return f'MaskedLinear(in_features={self.in_features}, ' + \\\n f'out_features={self.out_features}, bias={self.bias is not None})'\n\n\ndef _generate_sequential(dims):\n in_dim = dims[0]\n\n degrees = [torch.arange(in_dim)]\n for dim in dims[1:]:\n degrees += [torch.arange(dim) % (in_dim - 1)]\n degrees += [torch.arange(in_dim) % in_dim - 1]\n\n return degrees\n\n\ndef _generate_random_numbers(dims, generator, permute):\n in_dim = dims[0]\n\n samples = []\n # Avoid unconnected units by sampling at least the minimum number of connected neurons in the\n # previous layer\n min_val = 0\n\n # We assign values between 0 and D-2 such that we can simply arange/permute the indices for the\n # input layer\n for i, dim in enumerate(dims[:-1]):\n if i == 0:\n m_vals = torch.randperm(dim, generator=generator) if permute else torch.arange(dim)\n else:\n m_vals = torch.randint(min_val, in_dim-1, size=(dim,), generator=generator)\n min_val = m_vals.min().item()\n samples.append(m_vals)\n\n if dims[-1] > dims[0]:\n samples.append(samples[0].repeat(dims[-1] // dims[0]))\n else:\n samples.append(samples[0])\n\n return samples\n\n\ndef _generate_mask(m_prev, m_next, hidden=True):\n if hidden:\n mask = m_next[None, :] >= m_prev[:, None]\n else: # for output layer\n mask = m_next[None, :] > m_prev[:, None]\n return mask.float().t()\n", "sub_path": "pyblaze/nn/modules/made.py", "file_name": "made.py", "file_ext": "py", "file_size_in_byte": 4498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.Generator", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.functional.linear", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "146026813", "text": "'''\n # @ Author: Zion Deng\n # @ Description: Try to solve the project with MPC, pyomo \n Model description: \n state: x, y, theta, xdot, ydot, thetadot \n ctrl state: F, delta \n m=x(7);\n f(1,1) = x(4); % f\n f(2,1) = x(5); % dy\n f(3,1) = x(6); % d_theta\n f(4,1) = 0.001*(-(rou*Ar*(x(4)^2)/(2*m))-u(2)*(rou*Ag*(x(4)^2)/(2*m))); % df\n f(5,1) = -g-0.001*((rou*Ar*(x(5)^2)/(2*m))-u(2)*(rou*Ag*(x(5)^2)/(2*m))); % ddy\n f(6,1) = 0;\n f(7,1) = 0; % dm\n '''\n\nimport numpy as np \nimport pyomo.environ as pyo \nimport matplotlib.pyplot as plt \n\ndef MPC_reen():\n \"\"\" \n solve with pyomo\n return: feas, xOpt, uOpt \n \"\"\" \n # Constants\n M = 326956\n ROU = 1.1\n A = 100\n g = 10\n GAMMA = 0.1\n L = 70\n J = 1/2*M*L**2 \n K = GAMMA*ROU*A*g / (2*M)\n ST = 46000\n Ar=100\n Ag=36\n\n NX = 7 # number of states\n NU = 1 # number of inputs \n DT = 1 # time interval \n N = 70 # number of total intervals \n INITIAL_STATE = [201364, 102181, -1, 852, -767, 0, 354696] \n DESIRED_STATE = [260000, 20000, -1, 200, -560, 0, 326956.0] \n P = [1e-5, 1e-4, 1, 1e-2, 1e-2, 1, 1e-5] # P matrix for terminal state cost \n FMAX = 1.1 # the max force that engine can provide \n DELTAMAX = 0.1\n m = pyo.ConcreteModel() # pyomo model\n m.tidx = pyo.Set( initialize= range(0,N+1)) # time index \n m.xidx = pyo.Set( initialize= range(0, NX)) # state index \n m.uidx = pyo.Set( initialize= range(0, NU)) # input index \n\n m.x = pyo.Var(m.xidx, m.tidx) # model x[i,t]\n m.u = pyo.Var(m.uidx, m.tidx) # model u[i,t]\n\n # cost function \n m.cost = pyo.Objective(\n expr = sum((P[i] * (m.x[i,t] - DESIRED_STATE[i]))**2 for i in m.xidx for t in range(N-5,N)), \n sense = pyo.minimize \n ) \n # initial state constraints \n m.init_cons = pyo.Constraint(\n m.xidx, \n rule = lambda m, i: m.x[i,0] == INITIAL_STATE[i]\n ) \n # y > 200\n m.height_cons = pyo.Constraint(\n m.tidx,\n rule = lambda m, t: -m.x[1,t] <= 0\n if t < N else pyo.Constraint.Skip\n )\n # 0 {li_item.text}\\n\"\n \n\n # Display de estudios\n estudiostop = \"\"\n estudiostop1 = soup.find_all(\"div\", {\"class\": \"estudios\"})\n for x in estudiostop1:\n estudiostop += f\"=> {x.text}\\n\"\n\n # Leftbar items\n leftbar = \"\"\n leftbar1= soup.findAll(\"div\", {\"class\":\"leftbar\"})\n for left in leftbar1:\n for left_li in left.find_all(\"li\"):\n leftbar += f\"=> {left_li.text}\\n\"\n\n #get and display all social media with its links \n socialmedia= \"\"\n socialmedia1 = soup.find(\"div\", {\"class\": \"social pull-right\"})\n for social in socialmedia1.find_all('a'):\n socialmedia += \"=>\" + social['href']+ '\\n'\n\n\n #Count all \n\n ContadorA = soup.findAll('a')\n\n #### Prints \n print(\"Navegación: \\n\", hrefnav)\n print(\"Items del menú: \\n\", menutop)\n print(\"Items de estudios: \\n\", estudiostop)\n print(\"Leftbar items: \\n\", leftbar)\n print(\"Social media links: \\n\" , socialmedia)\n print(\"Contador de : \" , str(len(ContadorA)))\n\nestuds()", "sub_path": "Estudios.py", "file_name": "Estudios.py", "file_ext": "py", "file_size_in_byte": 1575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "408964072", "text": "from datetime import time\nfrom typing import List\nfrom concurrent.futures.thread import ThreadPoolExecutor\n\nfrom queries.project import Project\nfrom queries.vacancy import Vacancy\n\n\ndef create_work_message(vacancies: List[Vacancy]) -> str:\n message = ''\n for vacancy in vacancies:\n message += f'*{vacancy.date}*\\n' \\\n f'*{vacancy.company}*: [{vacancy.title}]({vacancy.link})\\n\\n'\n return message\n\n\ndef create_freelance_message(projects: List[Project]) -> str:\n message = ''\n for project in projects:\n message += f'*{project.date}*\\n' \\\n f'[{project.title}]({project.link})\\n' \\\n f'*{project.offers_count} Предложений*\\n\\n'\n return message\n\n\ndef start_parser(parser):\n data = parser.parse()\n return data\n\n\ndef get_queried_data(search_query, parsers):\n parsers = [parser(search_query) for parser in parsers]\n\n with ThreadPoolExecutor(max_workers=4) as executor:\n data = executor.map(start_parser, parsers)\n\n vacancies = []\n for nested in data:\n for vacancy in nested:\n vacancies.append(vacancy)\n vacancies.sort(key=lambda x: x.date, reverse=True)\n\n return vacancies\n\n\nif __name__ == '__main__':\n pass\n # print(get_vacancies('Trainee'))\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "queries.vacancy.Vacancy", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "queries.project.Project", "line_number": 17, "usage_type": "name"}, {"api_name": "concurrent.futures.thread.ThreadPoolExecutor", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "249489068", "text": "import os, ConfigParser, itertools\n\nclass ConfigFactory( object ):\n\n def __init__(self, conf, **kwargs):\n self.cpath = conf\n self.config = self._readConf( conf ) \n self.iters = self._getIterators() \n self.keys = self.iters[0]\n self.values = self.iters[1]\n\n ###\n # Methods for external use\n ###\n\n def get( self, sec, opt, dft=None ):\n try:\n option = self.config.get( sec, opt )\n try: \n return eval( option )\n except TypeError:\n return option\n except ConfigParser.NoOptionError:\n return dft\n\n def pop( self, sec, opt, dft=None ):\n value = self.get( sec, opt, dft )\n self.config.remove_option( sec, opt ) \n return value\n\n ###\n # Methods for internal use\n ###\n\n def _makeConf( self, keys, values ):\n new_config = ConfigParser.ConfigParser()\n new_config.optionxform = str\n fmap = { k:v for k,v in zip(keys, values) }\n for sec in self.config.sections():\n if sec == 'Iterators': continue\n if sec == 'Template': continue\n new_sec = sec\n if 'Name' in [ opt for opt,_ in self.config.items( sec ) ]:\n new_sec += self.config.get( sec, 'Name' ).format( **fmap )\n if not new_config.has_section( new_sec ):\n new_config.add_section( new_sec )\n for opt,val in self.config.items( sec ):\n if opt == 'Name': continue\n new_config.set( new_sec, opt.format( **fmap ), val.format( **fmap ) ) \n return new_config \n\n def _readConf( self, conf ):\n config = ConfigParser.ConfigParser()\n config.optionxform = str\n config.read( conf )\n self.template = config.get( 'Loop', 'Template' ) \n config.read( [self.template, conf] )\n return config\n\n def _makeConfigDir( self ):\n self.ConfigDir = self.config.get( 'Loop', 'ConfigDir' ) \n try:\n os.mkdir( self.ConfigDir )\n except OSError:\n pass\n\n def _getIterators( self ):\n self.nested = self.pop( 'Iterators', 'Nested', False )\n names, vals = self[ 'Iterators' ]\n if self.nested:\n vals = [ x for x in itertools.product( *vals ) ]\n else:\n vals = zip( *vals )\n return names, vals\n \n ###\n # Special Python methods\n ###\n\n def __getitem__(self, key ):\n opts, vals = zip( *self.config.items( key ) )\n return opts, [ eval(v) for v in vals ]\n\n def __iter__( self ):\n self.ind = -1 \n self._makeConfigDir()\n self.tag = self.config.get( 'Loop', 'Tag', 'loop_conf' )\n return self\n\n def next( self ):\n self.ind += 1\n if self.ind >= len( self.values ):\n raise StopIteration\n conf = self._makeConf( self.keys, self.values[self.ind] ) \n conf_path = os.path.join( self.ConfigDir, '%s.%d.conf' %(self.tag, self.ind) )\n with open( conf_path, 'wb' ) as f:\n conf.write( f )\n return conf\n", "sub_path": "Tools/ConfigFactory.py", "file_name": "ConfigFactory.py", "file_ext": "py", "file_size_in_byte": 3134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ConfigParser.NoOptionError", "line_number": 23, "usage_type": "attribute"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 36, "usage_type": "call"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 53, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}]} +{"seq_id": "347454840", "text": "# Author : Ttatanepvp123\n# Github : https://github.com/ttatanepvp123\n# License : GPL-3 ( https://www.gnu.org/licenses/gpl-3.0.en.html )\nimport _thread\nimport tempfile\nimport os\nimport time\nimport requests\nimport utils\nimport random\n\nclass indexer():\n # EVENTS\n def onReady(self):\n pass\n def onRequests(self, r : requests.Response):\n pass\n def onError(self, e):\n pass\n\n def addLinks(self, links):\n while self.linksFileIsOpen:\n time.sleep(0.005)\n self.linksFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\", \"a+\") as fp:\n for currentLink in links:\n fp.write(f\"{currentLink}\\n\")\n self.linksFileIsOpen = False\n\n def getLink(self):\n while self.linksFileIsOpen:\n time.sleep(0.005)\n self.linksFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\", \"r\") as fp:#get first link\n link = fp.readline()\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.tmp\", \"w\") as fp:#delete first link (without load file)\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\", \"r\") as fpp:\n for currentLine in fpp:\n if currentLine != link:\n fp.write(currentLine)\n os.remove(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\")\n os.rename(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.tmp\", f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\")\n self.linksFileIsOpen = False\n self.linksNumber -= 1\n return link.replace(\"\\n\",\"\")\n \n def addLinkChecked(self, link):\n while self.linksCheckedFileIsOpen:\n time.sleep(0.005)\n self.linksCheckedFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/checked.txt\", \"a+\") as fp:\n fp.write(f\"{link}\\n\")\n self.linksCheckedFileIsOpen = False\n\n def isChecked(self, link):\n returner = False\n while self.linksCheckedFileIsOpen:\n time.sleep(0.005)\n self.linksCheckedFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/checked.txt\", \"r\") as fp:\n for currentLink in fp:\n if currentLink.replace(\"\\n\",\"\") == link:\n returner = True\n break\n self.linksCheckedFileIsOpen = False\n return returner\n\n def worker(self, link):\n try:\n if self.isChecked(link):\n self.threadStarted -= 1\n return\n else:\n self.addLinkChecked(link)\n r = requests.get(link, headers=self.headers, timeout=self.timeout)\n self.onRequests(r)\n links = self.getAllLinks(r)\n self.addLinks(links)\n self.linksNumber += len(links)\n except Exception as e:\n self.onError(e)\n self.threadStarted -= 1\n\n def __init__(self, url, threadNumber=5, headers={\"User-Agent\":\"CookieBot/0.2 (+https://slackercompany.ml/CookieBot/)\"}, timeout=10):\n self.getAllLinks = utils.getAllLinks\n self.linksFileIsOpen = False\n self.linksCheckedFileIsOpen = False\n self.threadStarted = 0\n self.headers = headers\n self.timeout = timeout\n self.numberInstance = random.randint(0,99999999)\n os.mkdir(f\"{tempfile.gettempdir()}/{self.numberInstance}/\")\n self.addLinkChecked(\"debug\")\n self.addLinks([url])\n self.linksNumber = 1\n fakeDoWhile = False\n self.onReady()\n while True:\n time.sleep(0.0025)\n if self.threadStarted < threadNumber and self.linksNumber > 0:\n fakeDoWhile = True\n self.threadStarted += 1\n _thread.start_new_thread(self.worker, (self.getLink(),))\n elif fakeDoWhile and self.threadStarted == 0 and self.linksNumber == 0:\n break", "sub_path": "indexer.py", "file_name": "indexer.py", "file_ext": "py", "file_size_in_byte": 4023, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.Response", "line_number": 16, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 34, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 36, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 41, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 42, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 60, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.getAllLinks", "line_number": 85, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 91, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 92, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 92, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "552854359", "text": "import matplotlib.pyplot as plt \r\nimport numpy as np\r\n\r\nx=np.linspace(-3,3,50)\r\ny1=2*x+1\r\ny2=x**2\r\n\r\nplt.figure()\r\nplt.plot(x,y1)#第一张图\r\n\r\nplt.figure(figsize=(5,5))\r\nplt.plot(x,y2)#第二张图\r\nplt.plot(x,y1,color='red',linewidth=1.0,linestyle='--')#两条线画一个figure里面\r\nplt.show()", "sub_path": "Matplotlib_ex/plot_ex_2.py", "file_name": "plot_ex_2.py", "file_ext": "py", "file_size_in_byte": 300, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.linspace", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "630181633", "text": "from datetime import datetime\nfrom flask_restful import fields\nfrom blueprints import db\n\nclass Places(db.Model):\n __tablename__ = \"places\"\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(255), nullable=False)\n description = db.Column(db.Text, nullable=False)\n place_type = db.Column(db.String(255), nullable=False)\n primary_image = db.Column(db.Text, nullable=False)\n street = db.Column(db.String(255), nullable=False)\n city = db.Column(db.String(255), nullable=False)\n country = db.Column(db.String(255), nullable=False)\n longitude = db.Column(db.Float, nullable=False)\n latitude = db.Column(db.Float, nullable=False)\n deleted = db.Column(db.Boolean, default=False)\n created_at = db.Column(db.DateTime, default=datetime.now())\n updated_at = db.Column(db.DateTime, onupdate=datetime.now())\n\n response_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String,\n \"description\": fields.String,\n \"place_type\": fields.String,\n \"primary_image\": fields.String,\n \"street\": fields.String,\n \"city\": fields.String,\n \"country\": fields.String,\n \"longitude\": fields.Float,\n \"latitude\": fields.Float,\n \"deleted\": fields.Boolean,\n \"created_at\": fields.DateTime,\n \"updated_at\": fields.DateTime\n }\n\n def __init__(\n self,\n name,\n description,\n place_type,\n primary_image,\n street,\n city,\n country,\n longitude,\n latitude\n ):\n self.name = name\n self.description = description\n self.place_type = place_type\n self.primary_image = primary_image\n self.street = street\n self.city = city\n self.country = country\n self.longitude = longitude\n self.latitude = latitude\n\n def __repr__(self):\n return \"\" % self.id\n", "sub_path": "blueprints/places/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1927, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "blueprints.db.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "blueprints.db", "line_number": 5, "usage_type": "name"}, {"api_name": "blueprints.db.Column", "line_number": 7, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 7, "usage_type": "name"}, {"api_name": "blueprints.db.Integer", "line_number": 7, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 8, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 8, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 8, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 9, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 9, "usage_type": "name"}, {"api_name": "blueprints.db.Text", "line_number": 9, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 10, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 10, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 11, "usage_type": "name"}, {"api_name": "blueprints.db.Text", "line_number": 11, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 12, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 12, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 13, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 13, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 14, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 14, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 15, "usage_type": "name"}, {"api_name": "blueprints.db.Float", "line_number": 15, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 16, "usage_type": "name"}, {"api_name": "blueprints.db.Float", "line_number": 16, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 17, "usage_type": "name"}, {"api_name": "blueprints.db.Boolean", "line_number": 17, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 18, "usage_type": "name"}, {"api_name": "blueprints.db.DateTime", "line_number": 18, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "blueprints.db.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 19, "usage_type": "name"}, {"api_name": "blueprints.db.DateTime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_restful.fields.Integer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 27, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 28, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "flask_restful.fields.Float", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 30, "usage_type": "name"}, {"api_name": "flask_restful.fields.Float", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_restful.fields.Boolean", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 32, "usage_type": "name"}, {"api_name": "flask_restful.fields.DateTime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 33, "usage_type": "name"}, {"api_name": "flask_restful.fields.DateTime", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "400495010", "text": "import nltk\nimport string\nimport random\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity,euclidean_distances\nfrom nltk.stem import PorterStemmer\nimport operator\nfrom collections import Counter\nimport re\nimport math\nimport re\nimport gensim\nfrom gensim.parsing.preprocessing import remove_stopwords\nimport pandas as pd\nfrom gensim import corpora\nfrom sklearn.metrics.pairwise import cosine_similarity,euclidean_distances,manhattan_distances\nimport numpy as np\nfrom nltk.stem import WordNetLemmatizer\n \nwnl = WordNetLemmatizer()\n\n\ndf=pd.read_csv('chatbot/Faqs_pdeu.csv')\ndf.head()\n\ndef get_euclid(a,b):\n return math.sqrt(sum((a[k] - b[k])**2 for k in set(a.keys()).intersection(set(b.keys()))))\ndef get_man(a,b):\n return (sum((a[k] - b[k])**2 for k in set(a.keys()).intersection(set(b.keys()))))\ndef get_cosine(vec1, vec2):\n intersection = set(vec1.keys()) & set(vec2.keys())\n numerator = sum([vec1[x] * vec2[x] for x in intersection])\n\n sum1 = sum([vec1[x] ** 2 for x in list(vec1.keys())])\n sum2 = sum([vec2[x] ** 2 for x in list(vec2.keys())])\n denominator = math.sqrt(sum1) * math.sqrt(sum2)\n\n if not denominator:\n return 0.0\n else:\n return float(numerator) / denominator\n\ndef text_to_vector(text):\n words = WORD.findall(text)\n return Counter(words)\n\n\nWORD = re.compile(r\"\\w+\")\nf=open('chatbot/pdeu.txt','r',encoding='utf-8',errors='ignore')\nraw=f.read()\nraw=raw.lower()\n#print(raw)\nsent_tokens=nltk.sent_tokenize(raw)\n# print(sent_tokens)\nsent_tokens=[x.replace('\\n','') for x in sent_tokens]\n#print('------sent_tokens-----')\n#print(sent_tokens)\n\nword_tokens=nltk.word_tokenize(raw)\nlemmer=nltk.stem.WordNetLemmatizer()\n#print(sent_tokens)\n#print(len(sent_tokens))\n\ndef lemmatize(tokens):\n return [lemmer.lemmatize(token) for token in tokens]\nremove_punct_dict=dict((ord(punct),None) for punct in string.punctuation)\ndef normalize(text):\n return lemmatize(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))\n\n\ndef greet(sent):\n greet_resp=[\"hello welcome!!\",\"hi how are you?\",\"Pleasure to hear from you!!!\",\"Hello sir\",\"nice to meet you sir!!!\",\"What can I do for you?\"]\n greet_inp=['hii','heyaa','hello','hey there',\"hi\",\"hey\",\"hello\",\"howdy\",\"how are you?\"]\n if sent in [\"good morning\",\"good afternoon\",\"good evening\"]:\n return f\"hello , {sent}\"\n if sent==\"good night\":\n return \"good night\"\n\n if(sent[-1]=='?'):\n sent=sent[:-1]\n ps = PorterStemmer()\n arr=sent.split(' ')\n arr=[ps.stem(i) for i in arr]\n print('\\n\\n----------------------------------',arr,'\\n\\n')\n if('see' and 'you') in arr:\n return 'Talk to you Later'\n elif 'goodby' in arr or 'bye' in arr:\n return 'Good Bye :)'\n elif 'accredit' in arr and 'colleg' in arr:\n return 'Yes'\n elif 'instal' in arr and 'fee' in arr and 'pay' in arr:\n return 'Yes You can pay fees in two installmensts'\n elif 'hour' in arr and ('work' in arr or 'oper' in arr):\n return 'We are open 9:00am-4:00pm Monday-friday!'\n elif ('field' in arr or 'branch' in arr) and 'different' in arr and 'colleg' in arr:\n return '\"Petroleum Technology-120,Mechanical Engineering-120,Electrical Engineering-120,Civil Engineering-120,Chemical Engineering-120,Computer Science-60,Information and Communication Technology-60\".'\n elif ('cse' in arr or 'mechan' in arr or 'chemica' in arr or 'electr' in arr or 'comput' in arr or 'scienc' in arr or 'inform' or 'commun' in arr or 'technolg' in arr or 'petroleum' in arr) and 'subject' in arr:\n return 'You can check all this course related information from our website !'\n elif 'payment' in arr and 'fee' in arr and 'avail' in arr:\n return 'cheque,debit card,netbanking,credit card are acceptable. NEFT is preferable'\n elif 'is' in arr and 'transportation' in arr and 'avail' in arr:\n return 'Yes , bus service is available.'\n elif 'hostel' in arr and 'facil' in arr and 'avail' in arr:\n return 'Yes! we provide telephone , internet , AC , first-aid , reading , dining , security all this facility in hostel'\n elif 'transportation' in arr and 'fee' in arr:\n return 'transportaion fees of our college is 10500 per semester'\n elif 'semest'in arr and 'fee' in arr:\n return 'fees of our college is 110000 per semester!'\n elif 'chairman' in arr and 'who' in arr and 'colleg' in arr:\n return 'Mukesh Ambani is chairman of our college'\n elif 'is' in arr and 'under' in arr and 'gtu' in arr:\n return 'No, our college doesnt come under GTU.'\n elif 'scholarship' in arr and 'criteria' in arr:\n return 'you can check out at :: https://www.pdpu.ac.in/downloads/Financial%20Assistance%202019.pdf'\n\n for word in sent.split():\n if word.lower() in greet_inp:\n return random.choice(greet_resp)\n return None\n\n#Searching in file\n# Response for searching in file using TF-IDF\ndef resp(user_inp):\n ans,ind,hue=[],[],3\n tfidvec=TfidfVectorizer(tokenizer=normalize,stop_words='english')\n tfid=tfidvec.fit_transform(sent_tokens)\n\n vals=cosine_similarity(tfid[-1],tfid)\n d={}\n for i in range(0,len(vals[0])):\n \td[i]=vals[0][i]\n sorted_d = dict( sorted(d.items(), key=operator.itemgetter(1),reverse=True))\n for (key,val) in sorted_d.items():\n \tif(hue>0 and val>0):\n \t\tind.append(key)\n \telse:\n \t\tbreak\n \thue-=1\n flat=vals.flatten()\n \n flat=sorted(flat,reverse=True)\n req_tfid=flat[0]\n if(req_tfid==0):\n ans=ans+\"I am sorry! I don't understand you\" \n else:\n for index in ind: \n ans.append(sent_tokens[index])\n ans1=''\n for statements in ans:\n ans1=ans1+str(statements)\n ans1+='\\n'\n return ans1\n\ndef clean_sent(sent,stopwords=False):\n sent=sent.lower().strip()\n sent=re.sub(r'[^a-z0-9\\s]','',sent)\n if stopwords:\n sent=remove_stopwords(sent)\n return sent \n\ndef get_clean_sent(df,stopwords=False):\n sents=df[['Questions']]\n cleaned_sent=[]\n for index,row in df.iterrows():\n cleaned=clean_sent(row['Questions'],stopwords)\n cleaned=cleaned.lower()\n cleaned_sent.append(\" \".join([wnl.lemmatize(i) for i in cleaned.split()]))\n return cleaned_sent\n\n#Glove model\ndef getwordvec(word,model):\n samp=model['computer']\n sample_len=len(samp)\n vec=[0]*sample_len\n try:\n vec=model[word]\n except:\n vec=[0]*sample_len\n return vec\n\ndef getphrase(phrase,embeddingmodel):\n samp=getwordvec('computer',embeddingmodel)\n vec=np.array([0]*len(samp))\n den=0\n for word in phrase.split():\n den+=1\n vec=vec+np.array(getwordvec(word,embeddingmodel))\n return vec.reshape(1,-1)\n\ndef glove(question,cleaned_sent,param):\n google_model=gensim.models.KeyedVectors.load('chatbot/w2vecmodel.mod')\n sent_embedings=[]\n try_flag=False\n for sent in cleaned_sent:\n sent_embedings.append(getphrase(sent,google_model))\n ques_em=getphrase(question,google_model)\n max_sim=-1\n index_sim=-1\n try:\n for index,faq_em in enumerate(sent_embedings):\n if(param=='cosine'):\n sim=cosine_similarity(faq_em,ques_em)[0][0]\n if(param=='euclid'):\n sim=euclidean_distances(faq_em,ques_em)[0][0]\n if(param=='man'):\n sim=manhattan_distances(faq_em,ques_em)[0][0] \n if(sim>max_sim):\n max_sim=sim\n index_sim=index\n try_flag=True\n ans=df.iloc[index_sim,1]\n return ans,try_flag\n except Exception as e:\n return 0,try_flag\n\n\n#Response for bagofwords approach\ndef resp1(ques,param):\n cleaned_sent=get_clean_sent(df,stopwords=True)\n sentences=cleaned_sent\n sent_words=[[wrd for wrd in document.split()]for document in sentences]\n dictionary=corpora.Dictionary(sent_words)\n bow_corpus=[dictionary.doc2bow(text) for text in sent_words]\n ques=clean_sent(ques,stopwords=True)\n #print(ques)\n ques_em=dictionary.doc2bow(ques.split())\n #print(ques_em)\n ans,try_flag=glove(ques,cleaned_sent,param)\n #print('Returned ans :: ',ans)\n #print('try_flag :: ',try_flag)\n if try_flag:\n return ans\n return retrieve(ques_em,bow_corpus,df,sentences,ques,param)\n\n\ndef retrieve(ques_em,sent_em,df,sent,user_inp,param):\n max_sim=-1\n index_sim=-1\n try:\n for index,faq_em in enumerate(sent_em):\n if(param=='cosine'):\n sim=cosine_similarity(faq_em,ques_em)[0][0]\n if(param=='euclid'):\n sim=euclidean_distances(faq_em,ques_em)[0][0]\n if(param=='man'):\n sim=manhattan_distances(faq_em,ques_em)[0][0] \n if(sim>max_sim):\n max_sim=sim\n index_sim=index\n ans3=df.iloc[index_sim,1]\n return ans3\n except Exception as e:\n pass\n ans1=resp(user_inp)\n ans2=search_google(user_inp)\n cos1,cos2=0,0\n inp=text_to_vector(user_inp)\n cos1=get_cosine(inp,text_to_vector(ans1))\n cos2=get_cosine(inp,text_to_vector(ans2))\n if(cos1>=cos2):\n return ans1\n return ans2\n\ndef get_bot_resp(user_inp,param):\n flag=False\n while(1):\n ans=greet(user_inp.lower())\n print(\"got ans for query\",ans,user_inp)\n if(user_inp=='what are branches in sot'):\n ans=\"Following are the branches : Electrical,Chemical,Mechanical,Civil,Computer,ICT\"\n flag=True\n return ans,flag\n if(user_inp=='is there hostel facility in pdeu'):\n ans=\"Yes there is hostel facility in pdeu\"\n flag=True\n return ans,flag\n if(user_inp=='average fee per year'):\n ans='Average Fees 2,43,250 ruppes per year'\n flag=True\n return ans,flag\n if(ans!=None):\n flag=True\n return ans,flag\n return resp1(user_inp.lower(),param),flag\n\n\n\n", "sub_path": "chatbot/chatbot.py", "file_name": "chatbot.py", "file_ext": "py", "file_size_in_byte": 9965, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 27, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 45, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 48, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 53, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 60, "usage_type": "call"}, {"api_name": "nltk.stem", "line_number": 60, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 66, "usage_type": "attribute"}, {"api_name": "nltk.word_tokenize", "line_number": 68, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 81, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 128, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 132, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 156, "usage_type": "call"}, {"api_name": "gensim.parsing.preprocessing.remove_stopwords", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors.load", "line_number": 191, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 191, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.euclidean_distances", "line_number": 204, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.manhattan_distances", "line_number": 206, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 222, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 222, "usage_type": "name"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 242, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.euclidean_distances", "line_number": 244, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.manhattan_distances", "line_number": 246, "usage_type": "call"}]} +{"seq_id": "285238353", "text": "import time\nimport requests\nfrom pkg_resources import parse_version\n\nimport click\nimport gevent\nfrom gevent.event import AsyncResult\nfrom gevent.queue import Queue\nimport structlog\n\nfrom raiden.exceptions import RaidenShuttingDown\nfrom raiden.utils import get_system_spec\n\nCHECK_VERSION_INTERVAL = 3 * 60 * 60\nLATEST = 'https://api.github.com/repos/raiden-network/raiden/releases/latest'\nRELEASE_PAGE = 'https://github.com/raiden-network/raiden/releases'\n\nREMOVE_CALLBACK = object()\nlog = structlog.get_logger(__name__) # pylint: disable=invalid-name\n\n\ndef check_version():\n \"\"\"Check every 3h for a new release\"\"\"\n app_version = parse_version(get_system_spec()['raiden'])\n while True:\n try:\n content = requests.get(LATEST).json()\n # getting the latest release version\n latest_release = parse_version(content['tag_name'])\n # comparing it to the user's application\n if app_version < latest_release:\n msg = \"You're running version {}. The latest version is {}\".format(\n app_version,\n latest_release,\n )\n click.secho(msg, fg='red')\n click.secho(\"It's time to update! Releases: {}\".format(RELEASE_PAGE), fg='red')\n except requests.exceptions.HTTPError as herr:\n click.secho('Error while checking for version', fg='red')\n print(herr)\n except ValueError as verr:\n click.secho('Error while checking the version', fg='red')\n print(verr)\n finally:\n # repeat the process once every 3h\n gevent.sleep(CHECK_VERSION_INTERVAL)\n\n\nclass AlarmTask(gevent.Greenlet):\n \"\"\" Task to notify when a block is mined. \"\"\"\n\n def __init__(self, chain):\n super().__init__()\n self.callbacks = list()\n self.stop_event = AsyncResult()\n self.chain = chain\n self.last_block_number = None\n self.response_queue = Queue()\n\n # TODO: Start with a larger wait_time and decrease it as the\n # probability of a new block increases.\n self.wait_time = 0.5\n self.last_loop = time.time()\n\n def register_callback(self, callback):\n \"\"\" Register a new callback.\n\n Note:\n The callback will be executed in the AlarmTask context and for\n this reason it should not block, otherwise we can miss block\n changes.\n \"\"\"\n if not callable(callback):\n raise ValueError('callback is not a callable')\n\n self.callbacks.append(callback)\n\n def remove_callback(self, callback):\n \"\"\"Remove callback from the list of callbacks if it exists\"\"\"\n if callback in self.callbacks:\n self.callbacks.remove(callback)\n\n def _run(self): # pylint: disable=method-hidden\n self.last_block_number = self.chain.block_number()\n log.debug('starting block number', block_number=self.last_block_number)\n\n sleep_time = 0\n while self.stop_event.wait(sleep_time) is not True:\n try:\n self.poll_for_new_block()\n except RaidenShuttingDown:\n break\n\n # we want this task to iterate in the tick of `wait_time`, so take\n # into account how long we spent executing one tick.\n self.last_loop = time.time()\n work_time = self.last_loop - self.last_loop\n if work_time > self.wait_time:\n log.warning(\n 'alarm loop is taking longer than the wait time',\n work_time=work_time,\n wait_time=self.wait_time,\n )\n sleep_time = 0.001\n else:\n sleep_time = self.wait_time - work_time\n\n # stopping\n self.callbacks = list()\n\n def poll_for_new_block(self):\n chain_id = self.chain.network_id\n current_block = self.chain.block_number()\n\n if current_block > self.last_block_number + 1:\n difference = current_block - self.last_block_number - 1\n log.error('alarm missed %s blocks' % (difference), current_block=current_block)\n\n if current_block != self.last_block_number:\n log.debug(\n 'new block',\n number=current_block,\n timestamp=self.last_loop,\n )\n\n self.last_block_number = current_block\n remove = list()\n for callback in self.callbacks:\n result = callback(current_block, chain_id)\n if result is REMOVE_CALLBACK:\n remove.append(callback)\n\n for callback in remove:\n self.callbacks.remove(callback)\n\n def stop_async(self):\n self.stop_event.set(True)\n", "sub_path": "raiden/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 4787, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "structlog.get_logger", "line_number": 19, "usage_type": "call"}, {"api_name": "pkg_resources.parse_version", "line_number": 24, "usage_type": "call"}, {"api_name": "raiden.utils.get_system_spec", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "pkg_resources.parse_version", "line_number": 29, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 36, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 38, "usage_type": "attribute"}, {"api_name": "click.secho", "line_number": 39, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 42, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "gevent.Greenlet", "line_number": 49, "usage_type": "attribute"}, {"api_name": "gevent.event.AsyncResult", "line_number": 55, "usage_type": "call"}, {"api_name": "gevent.queue.Queue", "line_number": 58, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "raiden.exceptions.RaidenShuttingDown", "line_number": 91, "usage_type": "name"}, {"api_name": "time.time", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "128229949", "text": "# Copyright 2021 Open Robotics (2021)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom ament_index_python.packages import get_package_share_directory\n\nfrom launch import LaunchDescription\nfrom launch_ros.actions import Node\nfrom launch.actions import ExecuteProcess, IncludeLaunchDescription, RegisterEventHandler\nfrom launch.event_handlers import OnProcessExit\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\n\nimport xacro\nimport yaml\n\n\ndef load_file(package_name, file_path):\n package_path = get_package_share_directory(package_name)\n absolute_file_path = os.path.join(package_path, file_path)\n try:\n with open(absolute_file_path, 'r') as file:\n return file.read()\n except EnvironmentError:\n # parent of IOError, OSError *and* WindowsError where available\n return None\n\n\ndef load_yaml(package_name, file_path):\n package_path = get_package_share_directory(package_name)\n absolute_file_path = os.path.join(package_path, file_path)\n try:\n with open(absolute_file_path, 'r') as file:\n return yaml.safe_load(file)\n except EnvironmentError:\n # parent of IOError, OSError *and* WindowsError where available\n return None\n\n\ndef generate_launch_description():\n # moveit_cpp.yaml is passed by filename for now since it's node specific\n ur10_gazebo = os.path.join(\n get_package_share_directory('ur10_gazebo'),\n 'worlds',\n 'ur10.world')\n\n gazebo = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([os.path.join(\n get_package_share_directory('gazebo_ros'), 'launch'), '/gazebo.launch.py']),\n launch_arguments={'world': ur10_gazebo}.items(),\n )\n\n ur10_description_path = os.path.join(\n get_package_share_directory('ur10_description'))\n\n xacro_file = os.path.join(ur10_description_path,\n 'urdf',\n 'ur10_robot.urdf.xacro')\n\n doc = xacro.parse(open(xacro_file))\n xacro.process_doc(doc)\n robot_description_config = doc.toxml()\n robot_description = {'robot_description': robot_description_config}\n\n node_robot_state_publisher = Node(\n package='robot_state_publisher',\n executable='robot_state_publisher',\n output='screen',\n parameters=[robot_description]\n )\n\n spawn_entity = Node(package='gazebo_ros', executable='spawn_entity.py',\n arguments=['-topic', 'robot_description',\n '-entity', 'ur10'],\n output='screen')\n\n load_joint_state_controller = ExecuteProcess(\n cmd=['ros2', 'control', 'load_start_controller', 'joint_state_controller'],\n output='screen'\n )\n\n load_joint_trajectory_controller = ExecuteProcess(\n cmd=['ros2', 'control', 'load_start_controller', 'joint_trajectory_controller'],\n output='screen'\n )\n\n # Static TF\n #static_tf = Node(package='tf2_ros',\n # executable='static_transform_publisher',\n # name='static_transform_publisher',\n # output='log',\n # arguments=['0.0', '0.0', '0.65', '0.0', '0.0', '0.0', 'world', 'ur_base'])\n\n return LaunchDescription([\n RegisterEventHandler(\n event_handler=OnProcessExit(\n target_action=spawn_entity,\n on_exit=[load_joint_state_controller],\n )\n ),\n RegisterEventHandler(\n event_handler=OnProcessExit(\n target_action=load_joint_state_controller,\n on_exit=[load_joint_trajectory_controller],\n )\n ),\n gazebo,\n node_robot_state_publisher,\n #static_tf,\n spawn_entity\n ])\n", "sub_path": "ur10_gazebo/launch/ur10_gazebo.launch.py", "file_name": "ur10_gazebo.launch.py", "file_ext": "py", "file_size_in_byte": 4286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 54, "usage_type": "call"}, {"api_name": "launch.actions.IncludeLaunchDescription", "line_number": 58, "usage_type": "call"}, {"api_name": "launch.launch_description_sources.PythonLaunchDescriptionSource", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "xacro.parse", "line_number": 71, "usage_type": "call"}, {"api_name": "xacro.process_doc", "line_number": 72, "usage_type": "call"}, {"api_name": "launch_ros.actions.Node", "line_number": 76, "usage_type": "call"}, {"api_name": "launch_ros.actions.Node", "line_number": 83, "usage_type": "call"}, {"api_name": "launch.actions.ExecuteProcess", "line_number": 88, "usage_type": "call"}, {"api_name": "launch.actions.ExecuteProcess", "line_number": 93, "usage_type": "call"}, {"api_name": "launch.LaunchDescription", "line_number": 105, "usage_type": "call"}, {"api_name": "launch.actions.RegisterEventHandler", "line_number": 106, "usage_type": "call"}, {"api_name": "launch.event_handlers.OnProcessExit", "line_number": 107, "usage_type": "call"}, {"api_name": "launch.actions.RegisterEventHandler", "line_number": 112, "usage_type": "call"}, {"api_name": "launch.event_handlers.OnProcessExit", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "367111044", "text": "from numpy import array\nfrom keras.models import Sequential\nfrom keras.layers import Dense,LSTM\n\n\n#1. 데이터\nx=array([[1,2,3],[2,3,4],[3,4,5],[4,5,6]]) #4행 3열\ny=array([4,5,6,7]) #(4,)--->스칼라가 4개짜리 벡터1개 (4,)!=(4,1) 절대 (4,1)이라고 하면 안된다. input_dim=1(일차원)\n# y2=array([[4,5,6,7]]) #(1,4)\n# y3=array([[4],[5],[6],[7]]) #(4,1)\n\nprint(\"x.shape:\",x.shape) #(4,3)\nprint(\"y.shape:\",y.shape) #(4,) --->(4,1)이라고 하면 에러 난다. \n#shape해서 확인해보기!!\n#자르는 숫자 명시 ex)4x3x1--> (4,3)을 1개씩 연속된 데이터 계산하겠다(1개씩 작업) (행, 열, 몇개로 자를건지)\n\n# x=x.reshape(4,3,1) #전체 데이터는 변경되지 않는다. \n# reshape할 때 검사는 곱하기! (4*3)=(4*3*1)\nx=x.reshape(x.shape[0],x.shape[1],1) # x.shape[0]=4, x.shape[1]=3 \n#위에 식이나 아래식이나 결과는 동일하나 정석은 두번째꺼가 맞는 것!\n\nprint(\"x:\",x.shape)\nprint(\"x:\",x)\n\n#2. 모델구성\n# LSTM은 DENSE모델에 비해 많은 연산을 하게 된다. \nmodel=Sequential()\nmodel.add(LSTM(10,activation='relu',input_shape=(3,1))) #시계열 input_shape=(3,1) ***행 무시***, LSTM에서 중요한 것: 컬럼의 개수와 몇개씩 잘라서 계산할 것이냐, 행은 중요하지 않다\n#여기서부터는 Dense모델\nmodel.add(Dense(5))\nmodel.add(Dense(15))\nmodel.add(Dense(15))\nmodel.add(Dense(15))\nmodel.add(Dense(50))\nmodel.add(Dense(15))\nmodel.add(Dense(15))\nmodel.add(Dense(1)) #하나 예측 y=[4,5,6,7]\n\nmodel.summary() #param[1]=480\n# 이유: 1*10-->input/ 1*10--->bias/10*10--->역전파------->합쳐서 100+10+10=120////120*4=480\n\n\"\"\"\n#과제1\n#param 이 왜 480나오는 지 찾아보기\n#input_shape는 (3,1)밖�� 안들어갔는데 왜 480이 나올까\n\"\"\"\n#3. 실행\nmodel.compile(optimizer='adam',loss='mse') #metrics하나 안하나 상관없다.\nmodel.fit(x,y,epochs=300,batch_size=1)\n\nx_input=array([5,6,7]) #(3,) 와꾸가 안맞음--->(1,3,1)로 변환 (행, 열, 몇개로 쪼갤건지)\nx_input=x_input.reshape(1,3,1)\nprint(x_input)\n\nyhat=model.predict(x_input)\nprint(yhat)\n##정확하게 예측이 안된다. LSTM너무 적어서 , 수정할 수 있는 부분 수정\n\n\n\n#예제\n# x=array([[1,2,3],[1,2,3]]) #(2,3)\n# print(x.shape)\n# y=array([[[1,2],[4,2]],[[4,5],[5,6]]]) #(덩어리 개수, 개수, 제일 작은 단위) #작은거부터 치고 올라가기\n# print(y.shape)\n# z=array([[[1],[2],[3]],[[4],[5],[6]]])\n# print(z.shape)\n\n# w=array([[[1,2,3,4]]])\n# print(w.shape)\n# k=array([[[[1],[2]]],[[[3],[4]]]])\n# print(k.shape)\n###스칼라 벡터 행렬 텐서\n\n", "sub_path": "keras/keras29_lstm.py", "file_name": "keras29_lstm.py", "file_ext": "py", "file_size_in_byte": 2606, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "323144281", "text": "######################################################################\n######### DNA ALIQUOTING FOR 2 CRYO VIAL TUBE PLATES #################\n######################################################################\nfrom opentrons import protocol_api\nimport pandas as pd\nimport numpy as np\nimport os\n\n# metadata\nmetadata = {\n 'protocolName': 'DNA ALIQUOTING',\n 'author': 'Name ',\n 'description': 'DNA ALIQUOTING for Opentrons',\n 'apiLevel': '2.10'\n}\nwater_run = False\npositions = ['A1','A2','A3','A4','A5','A6','A7','A8','A9','A10','A11','A12']\n# positions = ['A1','A2','A3']\nDNA_volume = 200.0\n\n# protocol run function. the part after the colon lets your editor know\n# where to look for autocomplete suggestions\ndef run(protocol: protocol_api.ProtocolContext):\n #### DEFINE LABWARE\n #### TIP RACKS\n tip_rack1_300ul = protocol.load_labware('opentrons_96_tiprack_300ul', '1')\n\n #### PLATES\n dna_source_plate = protocol.load_labware('nest_96_wellplate_2ml_deep_on_chemagic_stand', '2')\n cryo_vial_tubes_1 = protocol.load_labware('nunc_cryo_vial_tubes', '5')\n cryo_vial_tubes_2 = protocol.load_labware('nunc_cryo_vial_tubes', '8')\n\n #### PIPETTES\n right_pipette = protocol.load_instrument(\n 'p300_multi_gen2', mount='right', tip_racks=[tip_rack1_300ul])\n\n def dna_aliquot():\n i = 0\n for j in range(len(positions)):\n comment = 'COLUMN NO: ' + str(i+1)\n protocol.comment(comment)\n right_pipette.pick_up_tip()\n # 1ST ALIQUOT\n right_pipette.aspirate(volume=DNA_volume,\n location=dna_source_plate[positions[i]].bottom(2),\n rate=0.3)\n protocol.delay(seconds=2)\n right_pipette.touch_tip()\n right_pipette.air_gap(2)\n\n right_pipette.dispense(volume=DNA_volume,\n location=cryo_vial_tubes_1[positions[i]].bottom(4),\n rate=0.3)\n right_pipette.blow_out(location=cryo_vial_tubes_1[positions[i]].bottom(9))\n protocol.delay(seconds=1)\n\n # 2ND ALIQUOT\n right_pipette.aspirate(volume=DNA_volume,\n location=dna_source_plate[positions[i]].bottom(1.25),\n rate=0.3)\n protocol.delay(seconds=2)\n right_pipette.touch_tip()\n right_pipette.air_gap(2)\n\n right_pipette.dispense(volume=DNA_volume,\n location=cryo_vial_tubes_2[positions[i]].bottom(2),\n rate=0.3)\n right_pipette.blow_out(location=cryo_vial_tubes_2[positions[i]].bottom(9))\n protocol.delay(seconds=1)\n\n if water_run:\n right_pipette.return_tip(home_after=False)\n else:\n right_pipette.drop_tip(home_after=False)\n i = i + 1\n\n\n def flashing_lights():\n for i in range(5):\n protocol.set_rail_lights(True)\n protocol.delay(seconds=0.5)\n protocol.set_rail_lights(False)\n protocol.delay(seconds=0.5)\n\n ### COMMANDS ####\n protocol.set_rail_lights(True)\n dna_aliquot()\n flashing_lights()\n protocol.set_rail_lights(True)\n", "sub_path": "OT2CEP20210331B09_DNA_EXT/Protocols/DNA_ALIQUOTING/DNA_ALIQUOTING_V1.0.py", "file_name": "DNA_ALIQUOTING_V1.0.py", "file_ext": "py", "file_size_in_byte": 3332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "opentrons.protocol_api.ProtocolContext", "line_number": 23, "usage_type": "attribute"}, {"api_name": "opentrons.protocol_api", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "207903835", "text": "import prepare_sigmorphon_data\nimport common\nimport codecs\n\nBEGIN_WORD = ''\nEND_WORD = ''\nNULL = '%'\n\ndef main():\n #train_path = '/Users/roeeaharoni/research_data/sigmorphon2016-master/data/german-task1-train'\n #test_path = '/Users/roeeaharoni/research_data/sigmorphon2016-master/data/german-task1-dev'\n\n train_path = '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_train.txt.sigmorphon_format'\n test_path = '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_test.txt.sigmorphon_format'\n dev_path = '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_dev.txt.sigmorphon_format'\n\n convert_sigmorphon_to_morphtrans(train_path, '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_train.txt.morphtrans_format.txt')\n convert_sigmorphon_to_morphtrans(test_path, '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_test.txt.morphtrans_format.txt', False)\n convert_sigmorphon_to_morphtrans(dev_path, '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_dev.txt.morphtrans_format.txt', False)\n\ndef convert_sigmorphon_to_morphtrans(sig_file, morphtrans_file, create_alphabet = True):\n\n (words, lemmas, feat_dicts) = prepare_sigmorphon_data.load_data(sig_file)\n alphabet, feats = prepare_sigmorphon_data.get_alphabet(words, lemmas, feat_dicts)\n alphabet.append(BEGIN_WORD)\n alphabet.append(END_WORD)\n\n if create_alphabet:\n with codecs.open(morphtrans_file + '.word_alphabet', \"w\", encoding='utf8') as alphabet_file:\n alphabet_file.write(' '.join([c for c in list(alphabet) if len(c) < 2]) + ' ' + END_WORD + ' '\n + BEGIN_WORD)\n\n morph2feats = common.cluster_data_by_morph_type(feat_dicts, feats)\n with codecs.open(morphtrans_file + '.morph_alphabet', \"w\", encoding='utf8') as alphabet_file:\n alphabet_file.write(' '.join([key for key in morph2feats.keys()]))\n\n with codecs.open(morphtrans_file, \"w\", encoding='utf8') as output_file:\n for lemma, word, dict in zip(lemmas, words, feat_dicts):\n # a b g a s k l a p p e | a b g a s k l a p p e |case=nominative:number=singular\n output_file.write(BEGIN_WORD + ' ' + ' '.join(list(lemma)) + ' ' + END_WORD + '|' + BEGIN_WORD + ' ' +\n ' '.join(list(word)) + ' ' + END_WORD + '|' + get_morph_string(dict, feats) + '\\n')\n return\n\ndef get_morph_string(feat_dict, feats):\n s = ''\n for f in sorted(feats):\n if f in feat_dict:\n s += f + '=' + feat_dict[f] + ':'\n else:\n s += f + '=' + NULL + ':'\n return s[:-1]\n\nif __name__ == '__main__':\n main()", "sub_path": "src/sigmorphon2morphtrans.py", "file_name": "sigmorphon2morphtrans.py", "file_ext": "py", "file_size_in_byte": 2851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "prepare_sigmorphon_data.load_data", "line_number": 23, "usage_type": "call"}, {"api_name": "prepare_sigmorphon_data.get_alphabet", "line_number": 24, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 29, "usage_type": "call"}, {"api_name": "common.cluster_data_by_morph_type", "line_number": 33, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 34, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "60976605", "text": "# from keras.datasets import mnist\n#\n# (train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n#\n# print(train_images.ndim)\n# print(train_images.shape)\n# print(train_images.dtype)\n#\n# digit = train_images[4]\n#\n# import matplotlib.pyplot as plt\n#\n# my_slice = train_images[10:100]\n# print(my_slice.shape)\n#\n# # my_slice2 = train_images[:, 14:, 14:]\n# # print(my_slice2.shape)\n#\n# def naive_relu(x):\n# assert len(x.shape) == 2\n# x = x.copy() #입력텐서를 바꾸지 않도록 복사\n# for i in range(x.shape[0]):\n# for j in range(x.shape[1]):\n# x[i, j] = max(x[i, j], 0)\n# return x\n#\n# def naive_add(x, y):\n# assert len(x.shape) == 2\n# assert x.shape == y.shape\n#\n# x = x.copy()\n# for i in range(x.shape[0]):\n# for j in range(x.shape[1]):\n# x[i, j] += y[i, j]\n# return x\n\nimport numpy as np\nimport os\nimport time\n\nos.environ['KERAS_BACKEND'] = 'plaidml.keras.backend'\nimport keras\nimport keras.applications as kapp\nfrom keras.datasets import cifar10\n\n(x_train, y_train_cats), (x_test, y_test_cats) = cifar10.load_data()\nbatch_size = 8\nx_train = x_train[:batch_size]\nx_train = np.repeat(np.repeat(x_train, 7, axis=1), 7, axis=2)\nprint('1')\nmodel = kapp.VGG19()\nmodel.compile(optimizer='sgd', loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nprint(\"Running initial batch (compiling tile program)\")\ny = model.predict(x=x_train, batch_size=batch_size)\n\n# Now start the clock and run 10 batches\nprint(\"Timing inference...\")\nstart = time.time()\nfor i in range(10):\n y = model.predict(x=x_train, batch_size=batch_size)\n print(i)\nprint(\"Ran in {} seconds\".format(time.time() - start))", "sub_path": "DeepLearning/mnist.py", "file_name": "mnist.py", "file_ext": "py", "file_size_in_byte": 1706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 41, "usage_type": "attribute"}, {"api_name": "keras.datasets.cifar10.load_data", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.datasets.cifar10", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.repeat", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.applications.VGG19", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 51, "usage_type": "name"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "646745512", "text": "'''\n2020-9-26 caoxinzi\n'''\n\n'''------------------图像可视化(查看不同step的图像)------------------'''\n\nfrom torchvision import datasets\nimport torchvision.transforms as transform\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport torch\n\nwriter = SummaryWriter(log_dir='./log')\nnum_workers = 0\n\n#每批加载16张图片\nbatch_size =16\n\n#percentage of training set to use as validation\nvalid_size = 0.2\n\n#将数据转换为torch.FloatTensor,并标准化\ntransform = transform.Compose([\n transform.ToTensor(),\n transform.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))\n])\n\n#选择训练数据集和测试数据集\ntrain_data = datasets.CIFAR10('data', train=True, download=True, transform=transform)\ntest_data = datasets.CIFAR10('data', train=False, download=True, transform=transform)\n\n#将训练数据集中划分验证集\nnum_train = len(train_data)\nindices = list(range(num_train))\nnp.random.shuffle(indices)\nsplit = int(np.floor(valid_size * num_train))\ntrain_idx, valid_idx = indices[split:],indices[:split]\n\n#define samples for obtaining training and validation batches\ntrain_sampler = SubsetRandomSampler(train_idx)\nvalid_sampler = SubsetRandomSampler(valid_idx)\n\n#prepare data loaders(combine dataset and sampler)\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers)\n\nvalid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers)\n\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)\n\n#对训练数据进行可视化\nimgs, labels = iter(train_data).__next__()\n\nfor i in range(len(imgs)):\n # 乘以偏差\n img = imgs[i].mul(torch.Tensor(np.array([0.5, 0.5, 0.5]).reshape(-1,1,1)))\n \n #加上均值\n img = imgs[i].add(torch.Tensor(np.array([0.5, 0.5, 0.5]).reshape(-1,1,1)))\n \n #加入图像数据\n writer.add_image('input',img,i+1)\n\n", "sub_path": "8_1 Pytorch Visualization.py", "file_name": "8_1 Pytorch Visualization.py", "file_ext": "py", "file_size_in_byte": 2043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.random.shuffle", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.SubsetRandomSampler", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.SubsetRandomSampler", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "63529545", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n AipBase\n\"\"\"\nimport hmac\nimport json\nimport hashlib\nimport http\nimport datetime\nimport base64\nimport time\nfrom urllib import urlencode\nfrom urllib import quote\nfrom urlparse import urlparse\n\nclass AipBase:\n \"\"\"\n AipBase\n \"\"\"\n\n __accessTokenUrl = 'https://aip.baidubce.com/oauth/2.0/token'\n\n __scopes = set([\n 'vis-ocr_ocr',\n 'vis-ocr_bankcard',\n 'vis-faceattribute_faceattribute',\n 'nlp_wordseg',\n 'nlp_simnet',\n 'nlp_wordemb',\n 'nlp_comtag',\n 'nlp_wordpos',\n 'nlp_dnnlm_cn',\n 'vis-antiporn_antiporn_v2',\n 'audio_voice_assistant_get',\n 'audio_tts_post',\n 'vis-faceverify_faceverify',\n ])\n\n def __init__(self, appId, apiKey, secretKey):\n \"\"\"\n AipBase(appId, apiKey, secretKey)\n \"\"\"\n\n self._appId = appId.strip()\n self._apiKey = apiKey.strip()\n self._secretKey = secretKey.strip()\n self._authObj = {}\n self._isCloudUser = None\n\n def _request(self, url, data):\n \"\"\"\n self._request('', {})\n \"\"\"\n\n authObj = self._auth()\n headers = self._getAuthHeaders('POST', url)\n params = self._getParams(authObj)\n\n response = http.post(url, data=data, params=params, headers=headers)\n obj = self._proccessResult(response)\n\n if not self._isCloudUser and obj.get('error_code', '') == 110:\n authObj = self._auth(True)\n params = self._getParams(authObj)\n response = http.post(url, data=data, params=params, headers=headers)\n obj = self._proccessResult(response)\n\n return obj\n\n def _proccessResult(self, content):\n \"\"\"\n formate result\n \"\"\"\n\n return json.loads(content) or {}\n\n def _auth(self, refresh=False):\n \"\"\"\n api access auth\n \"\"\"\n \n if len(self._apiKey) == 32 or self._isCloudUser == True:\n self._isCloudUser = True\n return\n\n #未过期\n if not refresh:\n tm = self._authObj.get('time', 0) + int(self._authObj.get('expire_in', 0)) - 30\n if tm > int(time.time()):\n return self._authObj\n\n obj = json.loads(http.get(self.__accessTokenUrl, params={\n 'grant_type': 'client_credentials',\n 'client_id': self._apiKey,\n 'client_secret': self._secretKey,\n }))\n\n self._isCloudUser = not self._isPermission(obj)\n\n return obj\n\n def _isPermission(self, authObj):\n \"\"\"\n check whether permission\n \"\"\"\n\n scopes = authObj.get('scope', False) \n if scopes == False:\n return False\n\n intersection = self.__scopes.intersection(set(scopes.split(' ')))\n\n return not not intersection\n\n def _getParams(self, authObj):\n \"\"\"\n api request http url params\n \"\"\"\n\n params = {}\n\n if self._isCloudUser == False:\n params['access_token'] = authObj['access_token']\n\n return params\n\n def _getAuthHeaders(self, method, url):\n \"\"\"\n api request http headers\n \"\"\"\n if self._isCloudUser == False:\n return {}\n\n # UTC timestamp\n timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n urlResult = urlparse(url)\n host = urlResult.hostname\n path = urlResult.path\n version, expire, signatureHeaders = '1', '1800', 'host'\n\n # 1 Generate SigningKey\n val = \"bce-auth-v%s/%s/%s/%s\" % (version, self._apiKey, timestamp, expire)\n signingKey = hmac.new(self._secretKey, val, hashlib.sha256).hexdigest().encode('utf-8')\n\n # 2 Generate CanonicalRequest\n # 2.1 Genrate CanonicalURI\n canonicalUri = quote(path)\n # 2.2 Generate CanonicalURI: not used here\n # 2.3 Generate CanonicalHeaders: only include host here\n canonicalHeaders = 'host:%s' % quote(host).strip()\n # 2.4 Generate CanonicalRequest\n canonicalRequest = '%s\\n%s\\n\\n%s' % (method.upper(), canonicalUri, canonicalHeaders)\n\n # 3 Generate Final Signature \n signature = hmac.new(signingKey, canonicalRequest, hashlib.sha256).hexdigest()\n authorization = 'bce-auth-v%s/%s/%s/%s/%s/%s' % (version, self._apiKey, timestamp, expire, signatureHeaders, signature)\n\n return {\n 'Host': host,\n 'x-bce-date': timestamp,\n 'accept': '*/*',\n 'authorization': authorization,\n }\n", "sub_path": "venv/Lib/site-packages/aip/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 4571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "http.post", "line_number": 60, "usage_type": "call"}, {"api_name": "http.post", "line_number": 66, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 93, "usage_type": "call"}, {"api_name": "http.get", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "attribute"}, {"api_name": "urlparse.urlparse", "line_number": 137, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 144, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 144, "usage_type": "attribute"}, {"api_name": "urllib.quote", "line_number": 148, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 151, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 156, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "434992406", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom sphinx.util.compat import Directive\n\nimport io\nimport os\nimport sys\nimport re\n\nclass question(nodes.General, nodes.Element): pass\n\ndef visit_question_node(self, node):\n self.body.append(\"
\")\n self.body.append(\"
\"+node[\"ques\"]+\"
\")\n if node[\"a\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"a\"]+\"
\")\n if node[\"b\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"b\"]+\"
\")\n if node[\"c\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"c\"]+\"
\")\n if node[\"d\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"d\"]+\"
\")\n if node[\"e\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"e\"]+\"
\")\n if node[\"f\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"f\"]+\"
\")\n self.body.append(\"
\")\n self.body.append(\"
\")\n self.body.append(\"
\")\n\n self.body.append(\"\")\n\n\ndef depart_question_node(self, node):\n pass\n\nclass Question(Directive):\n has_content = True\n required_arguments = 1\n optional_arguments = 7\n final_argument_whitespace = False\n option_spec = {\n \"ra\": directives.unchanged,\n \"a\": directives.unchanged,\n \"b\": directives.unchanged,\n \"c\": directives.unchanged,\n \"d\": directives.unchanged,\n \"e\": directives.unchanged,\n \"f\": directives.unchanged,\n }\n\n def run(self):\n id = \"\".join(self.arguments)\n id = id.replace(\"'\", \"\")\n id = id.replace(\"?\", \"\")\n ques = \" \".join(self.arguments)\n\n ra = None\n a = None\n b = None\n c = None\n d = None\n e = None\n f = None\n\n if \"ra\" in self.options:\n ra = self.options[\"ra\"]\n if \"a\" in self.options:\n a = self.options[\"a\"]\n if \"b\" in self.options:\n b = self.options[\"b\"]\n if \"c\" in self.options:\n c = self.options[\"c\"]\n if \"d\" in self.options:\n d = self.options[\"d\"]\n if \"e\" in self.options:\n e = self.options[\"e\"]\n if \"f\" in self.options:\n f = self.options[\"f\"]\n\n return [question(id=id, ques=ques, ra=ra, a=a, b=b, c=c, d=d, e=e, f=f)]\n\n\ndef setup(app):\n app.add_node(question, html=(visit_question_node, depart_question_node))\n app.add_directive(\"question\", Question)", "sub_path": "back_annotation_loop/elec1601_sphinx_v4/source/exts/question.py", "file_name": "question.py", "file_ext": "py", "file_size_in_byte": 4496, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "docutils.nodes.General", "line_number": 14, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 14, "usage_type": "name"}, {"api_name": "docutils.nodes.Element", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sphinx.util.compat.Directive", "line_number": 62, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 68, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 68, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 69, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 69, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 70, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 70, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 71, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 71, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 72, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 72, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 73, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 73, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 74, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "42662080", "text": "# from pyzbar import pyzbar\r\n# import argparse\r\n# import cv2\r\n# # construct the argument parser and parse the arguments\r\n# ap = argparse.ArgumentParser()\r\n# ap.add_argument(\"-i\", \"--image\", required=True,\r\n# help=\"path to input image\")\r\n# args = vars(ap.parse_args())\r\n\r\n# # load the input image\r\n# image = cv2.imread(args[\"C:\\\\Users\\\\jfern\\\\OneDrive\\\\Desktop\\\\12345.png\"])\r\n\r\n# # find the barcodes in the image and decode each of the barcodes\r\n# barcodes = pyzbar.decode(image)\r\n\r\n# for barcode in barcodes:\r\n# \t# extract the bounding box location of the barcode and draw the\r\n# \t# bounding box surrounding the barcode on the image\r\n# \t(x, y, w, h) = barcode.rect\r\n# \tcv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n\r\n# \t# the barcode data is a bytes object so if we want to draw it on\r\n# \t# our output image we need to convert it to a string first\r\n# \tbarcodeData = barcode.data.decode(\"utf-8\")\r\n# \tbarcodeType = barcode.type\r\n\r\n# \t# draw the barcode data and barcode type on the image\r\n# \ttext = \"{} ({})\".format(barcodeData, barcodeType)\r\n# \tcv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,\r\n# \t\t0.5, (0, 0, 255), 2)\r\n\r\n# \t# print the barcode type and data to the terminal\r\n# \tprint(\"[INFO] Found {} barcode: {}\".format(barcodeType, barcodeData))\r\n\r\n# # show the output image\r\n# cv2.imshow(\"Image\", image)\r\n# cv2.waitKey(0)\r\n\r\n\r\nimport hashlib\r\nfrom imutils.video import VideoStream\r\nfrom pyzbar import pyzbar\r\nimport argparse\r\nimport datetime\r\nimport imutils\r\nimport time\r\nimport cv2\r\n\r\na = 0\r\n\r\n# construct the argument parser and parse the arguments\r\n# ap = argparse.ArgumentParser()\r\n# ap.add_argument(\"-o\", \"--output\", type=str, default=\"\",\r\n# help=\"path to output CSV file containing barcodes\")\r\n# args = vars(ap.parse_args())\r\n\r\nprint(\"[INFO] starting video stream...\")\r\n# vs = VideoStream(src=0).start()\r\nvs = VideoStream(0).start()\r\ntime.sleep(1.0)\r\n\r\n# open the output CSV file for writing and initialize the set of\r\n# barcodes found thus far\r\n# csv = open(args[\"output\"], \"w\")\r\nfound = set()\r\nwhile True:\r\n # grab the frame from the threaded video stream and resize it to\r\n # have a maximum width of 400 pixels\r\n frame = vs.read()\r\n frame = imutils.resize(frame, width=400)\r\n\r\n # find the barcodes in the frame and decode each of the barcodes\r\n barcodes = pyzbar.decode(frame)\r\n # loop over the detected barcodes\r\n for barcode in barcodes:\r\n # extract the bounding box location of the barcode and draw\r\n # the bounding box surrounding the barcode on the image\r\n (x, y, w, h) = barcode.rect\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n\r\n # the barcode data is a bytes object so if we want to draw it\r\n # on our output image we need to convert it to a string first\r\n barcodeData = barcode.data.decode(\"utf-8\")\r\n barcodeType = barcode.type\r\n\r\n # draw the barcode data and barcode type on the image\r\n text = \"{}\".format(barcodeData)\r\n cv2.putText(frame, text, (x, y - 10),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n\r\n a = 1\r\n\r\n # if the barcode text is currently not in our CSV file, write\r\n # the timestamp + barcode to disk and update the set\r\n # if barcodeData not in found:\r\n # csv.write(\"{},{}\\n\".format(datetime.datetime.now(),\r\n # barcodeData))\r\n # csv.flush()\r\n # found.add(barcodeData)\r\n\r\n # show the output frame\r\n cv2.imshow(\"Barcode Scanner\", frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n\r\n # if the `q` key was pressed, break from the loop\r\n if key == ord(\"q\") or a == 1:\r\n break\r\n\r\n# close the output CSV file do a bit of cleanup\r\nprint(\"[INFO] cleaning up...\")\r\nprint(text)\r\n\r\n# initializing string\r\nstr = text\r\n\r\n\r\n# encoding GeeksforGeeks using encode()\r\n# then sending to SHA256()\r\nresult = hashlib.sha256(str.encode())\r\n\r\n# printing the equivalent hexadecimal value.\r\nprint(\"The hexadecimal equivalent of SHA256 is : \")\r\nprint(result.hexdigest())\r\n\r\nprint(\"\\r\")\r\n# csv.close()\r\ncv2.destroyAllWindows()\r\nvs.stop()\r\n\r\n\r\n", "sub_path": "DMCE final codebase/blockchain_client/scanner.py", "file_name": "scanner.py", "file_ext": "py", "file_size_in_byte": 4163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "imutils.video.VideoStream", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "imutils.resize", "line_number": 70, "usage_type": "call"}, {"api_name": "pyzbar.pyzbar.decode", "line_number": 73, "usage_type": "call"}, {"api_name": "pyzbar.pyzbar", "line_number": 73, "usage_type": "name"}, {"api_name": "cv2.rectangle", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 103, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "444358816", "text": "\"\"\"\nDjango settings for agencia24 project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nPROJECT_ROOT = BASE_DIR\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'i1)hxlju1t0i0-7kq&!&usy*2^xvx2fn4d!oa(vbdfbf1f3hs8'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\t# 3rd party apps I added:\n 'axes',\n 'bootstrap3_datetime',\n 'corsheaders',\n 'django_extensions',\n 'django_modalview',\n #'debug_toolbar',\n 'django_unused_media',\n 'django_user_agents',\n 'fixture_magic',\n 'longerusernameandemail',\n 'mathfilters',\n 'oauth2_provider', # add 'WSGIPassAuthorization On' to httpd.conf file\n\t'pagination',\n 'passwords',\n\t'registration',\n 'widget_tweaks',\n\t# My apps\n 'bet',\n 'simple_webservice',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'pagination.middleware.PaginationMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'axes.middleware.FailedLoginMiddleware',\n 'oauth2_provider.middleware.OAuth2TokenMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n)\n\nROOT_URLCONF = 'agencia24.urls'\n\nWSGI_APPLICATION = 'agencia24.wsgi.application'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_ROOT, \"templates\"),\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n 'bet.context_processors.debug',\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n ],\n #'debug': False,\n },\n },\n]\nfrom django.template.loaders import eggs\n\nif not DEBUG:\n TEMPLATES[0]['OPTIONS']['loaders'] = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n )),\n)\n\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-ar'\n\nTIME_ZONE = 'America/Argentina/Cordoba'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\nMEDIA_URL = \"/site_media/media/\"\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, \"site_media\", \"_media\")\n\nSTATIC_URL = '/site_media/static/'\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, \"static\")\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT, \"site_media\", \"static\"),\n)\n\nLOCALE_PATHS = (\n os.path.join(PROJECT_ROOT, 'locale').replace('\\\\', '/'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': 'A24 %(levelname)s [%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'agencia24_default': {\n 'handlers': ['console', 'mail_admins'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\n#===============================================================================\n# REGISTRATION!\n#===============================================================================\n\nACCOUNT_ACTIVATION_DAYS = 5\nSEND_ACTIVATION_EMAIL = False\n\nEMAIL_HOST_USER = \"no_reply_agencia24\"\nEMAIL_HOST_PASSWORD = \"noresponder\"\nEMAIL_HOST = 'smtp.webfaction.com'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = '(Agencia24) '\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\n\nREQUIRE_UNIQUE_EMAIL = False\n\n#===============================================================================\n\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/accounts/login'\n\n\n#===============================================================================\n# MERCADOPAGO\n#===============================================================================\n\nMP_ACCESS_TOKEN = \"TEST-2692549476916264-012507-3c26394260b30dfc3c78a004094cf36d__LA_LB__-162591608\"\n\nMP_CLIENT_ID = \"2692549476916264\"\nMP_SECRET_KEY = \"oB4gYLQz5lNhFRWOuXt0WNW4umSW2mvj\"\n\n#===============================================================================\n# PDFKIT\n#===============================================================================\n\nWKHTMLTOPDF_PATH = ''\n\n#===============================================================================\n# PUSH\n#===============================================================================\n\n# IOS\nIOS_PUSH_HEADERS = {\n \"Authorization\": \"key=AIzaSyAuMBsR2J-i1Ne9gHH_1DL8jbHEBYJ5IgU\",\n \"content-Type\": \"application/json\"\n}\n\nANDROID_PUSH_HEADERS = {\n \"Authorization\": \"key=AIzaSyD-dcMsjsQsWbJ1tPwjsnMdwym79mE8xDU\",\n #\"Authorization\": \"key=AIzaSyA-D9yqibGabnUb_5bqQZptdQFxBQndGuc\",\n \"content-Type\": \"application/json\"\n}\n\n#===============================================================================\n# DJANGO OAUTH TOOLKIT\n#===============================================================================\n\nOAUTH2_PROVIDER = {\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 600, # Seconds\n 'REFRESH_TOKEN_EXPIRE_SECONDS': 6*3600,\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nAUTHENTICATION_BACKENDS = (\n 'oauth2_provider.backends.OAuth2Backend',\n # Uncomment following if you want to access the admin\n 'django.contrib.auth.backends.ModelBackend'\n)\n\n#===============================================================================\n# DJANGO-PASSWORDS!\n#===============================================================================\n\nPASSWORD_MIN_LENGTH = 4\n\nPASSWORD_COMPLEXITY = { # You can omit any or all of these for no limit for that particular set\n \"UPPER\": 0, # Uppercase\n \"LOWER\": 0, # Lowercase\n \"LETTERS\": 0, # Either uppercase or lowercase letters\n \"DIGITS\": 0, # Digits\n \"SPECIAL\": 0, # Not alphanumeric, space or punctuation character\n \"WORDS\": 0 # Words (alphanumeric sequences separated by a whitespace or punctuation character)\n}\n\n#===============================================================================\n#===============================================================================\n\nQUINI6_MAX_NUMBER = 45\nLOTO_MAX_NUMBER = 41\nLOTO_MAX_EXTRA = 9\nLOTO5_MAX_NUMBER = 36\nBRINCO_MAX_NUMBER = 39\n\n#===============================================================================\n# DJANGO USER AGENT\n#===============================================================================\n\n# TODO!\n# Cache backend is optional, but recommended to speed up user agent parsing\n#CACHES = {\n# 'default': {\n# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n# 'LOCATION': '127.0.0.1:11211',\n# }\n#}\n\n# Name of cache backend to cache user agents. If it not specified default\n# cache alias will be used. Set to `None` to disable caching.\n#USER_AGENTS_CACHE = 'default'\n\n#===============================================================================\n# DJANGO AXES\n#===============================================================================\n\nfrom django.utils.timezone import timedelta\nAXES_COOLOFF_TIME = timedelta(minutes=20) # Hours\nAXES_LOCKOUT_TEMPLATE = 'registration/login.html'\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = True\n\n\"\"\"\nAXES_LOGIN_FAILURE_LIMIT: The number of login attempts allowed before a record is created for the failed logins. Default: 3\nAXES_LOCK_OUT_AT_FAILURE: After the number of allowed login attempts are exceeded, should we lock out this IP (and optional user agent)? Default: True\nAXES_USE_USER_AGENT: If True, lock out / log based on an IP address AND a user agent. This means requests from different user agents but from the same IP are treated differently. Default: False\nAXES_COOLOFF_TIME: If set, defines a period of inactivity after which old failed login attempts will be forgotten. Can be set to a python timedelta object or an integer. If an integer, will be interpreted as a number of hours. Default: None\nAXES_LOGGER: If set, specifies a logging mechanism for axes to use. Default: 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE: If set, specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as context variables. Default: None\nAXES_LOCKOUT_URL: If set, specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template will be used. Default: None\nAXES_VERBOSE: If True, you'll see slightly more logging for Axes. Default: True\nAXES_USERNAME_FORM_FIELD: the name of the form field that contains your users usernames. Default: username\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP: If True prevents to login from IP under particular user if attempts limit exceed, otherwise lock out based on IP. Default: False\n\"\"\"\n\n#===============================================================================\n\nADMINS = [('Developer', 'developer@liricus.com.ar')]\n#DEBUG = False\n#ALLOWED_HOSTS = ['*']\n\nSUPPORTED_IMPORT_EXT = ('.csv',)\nEXTRACT_SEPARATOR = '*'\n\nfrom local_settings_sf import *\n", "sub_path": "agencia24/settings_sf.py", "file_name": "settings_sf.py", "file_ext": "py", "file_size_in_byte": 11803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 326, "usage_type": "call"}]} +{"seq_id": "452394228", "text": "# -*- coding: utf-8 -*-\nfrom .file_utils import fetch_or_upload_file_links, fetch_or_upload_html_links\nimport time as _time\nfrom installed_clients.baseclient import ServerError as _DFUError\nfrom uuid import uuid4\n\n\"\"\" Utilities for creating reports using DataFileUtil \"\"\"\n\n\ndef create_report(params, dfu):\n \"\"\"\n Create a simple report\n :param params: see the KIDL spec for the create() parameters\n :param dfu: instance of DataFileUtil\n :return: report data\n \"\"\"\n report_name = \"report_\" + str(uuid4())\n workspace_id = _get_workspace_id(dfu, params)\n # Empty defaults for merging\n report_data = {\n 'objects_created': [],\n 'text_message': '',\n }\n report_data.update(params['report'])\n save_object_params = {\n 'id': workspace_id,\n 'objects': [{\n 'type': 'KBaseReport.Report',\n 'data': report_data,\n 'name': report_name,\n 'meta': {},\n 'hidden': 1\n }]\n }\n obj = _save_object(dfu, save_object_params)\n ref = _get_object_ref(obj)\n return {'ref': ref, 'name': report_name}\n\n\ndef create_extended(params, dfu, templater):\n \"\"\"\n Create an extended report\n This will upload files to shock if you provide scratch paths instead of shock_ids\n :param params: see the KIDL spec for create_extended_report() parameters\n :param dfu: instance of DataFileUtil\n :return: uploaded report data - {'ref': r, 'name': n}\n \"\"\"\n file_links = params.get('file_links', [])\n html_links = params.get('html_links', [])\n files = fetch_or_upload_file_links(dfu, file_links, templater) # see ./file_utils.py\n html_files = fetch_or_upload_html_links(dfu, html_links, templater)\n report_data = {\n 'text_message': params.get('message'),\n 'file_links': files,\n 'html_links': html_files,\n 'warnings': params.get('warnings', []),\n 'direct_html': params.get('direct_html'),\n 'direct_html_link_index': params.get('direct_html_link_index'),\n 'objects_created': params.get('objects_created', []),\n 'html_window_height': params.get('html_window_height'),\n 'summary_window_height': params.get('summary_window_height')\n }\n report_name = params.get('report_object_name', 'report_' + str(uuid4()))\n workspace_id = _get_workspace_id(dfu, params)\n save_object_params = {\n 'id': workspace_id,\n 'objects': [{\n 'type': 'KBaseReport.Report',\n 'data': report_data,\n 'name': report_name,\n 'meta': {},\n 'hidden': 1\n }]\n }\n obj = _save_object(dfu, save_object_params)\n ref = _get_object_ref(obj)\n return {'ref': ref, 'name': report_name}\n\n\ndef _get_workspace_id(dfu, params):\n \"\"\"\n Get the workspace ID from the params, which may either have 'workspace_id'\n or 'workspace_name'. Workspace ID is immutable so should take precedence.\n \"\"\"\n if 'workspace_id' in params:\n return params.get('workspace_id')\n\n return dfu.ws_name_to_id(params['workspace_name'])\n\n\ndef _get_object_ref(obj):\n \"\"\" Get the reference string from an uploaded dfu object \"\"\"\n return str(obj[6]) + '/' + str(obj[0]) + '/' + str(obj[4])\n\n\ndef _save_object(dfu, params):\n \"\"\" Save an object with DFU using error handling \"\"\"\n try:\n return dfu.save_objects(params)[0]\n except _DFUError as err:\n print(f'{_time.time()} DataFileUtil exception: {err}')\n raise err\n except Exception as err:\n print(f'{_time.time()} Unexpected DataFileUtil exception: {err}')\n raise err\n", "sub_path": "lib/KBaseReport/utils/report_utils.py", "file_name": "report_utils.py", "file_ext": "py", "file_size_in_byte": 3588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "uuid.uuid4", "line_number": 17, "usage_type": "call"}, {"api_name": "file_utils.fetch_or_upload_file_links", "line_number": 50, "usage_type": "call"}, {"api_name": "file_utils.fetch_or_upload_html_links", "line_number": 51, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 63, "usage_type": "call"}, {"api_name": "installed_clients.baseclient.ServerError", "line_number": 100, "usage_type": "name"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "308672346", "text": "import time\nimport math\nimport numpy as np\nimport scipy\nfrom scipy import optimize\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport csv\nimport pandas as pd\nfrom definitions import *\nimport callFile\nfrom scipy import stats as stats\n\ndef main(N,smallestIncrement):\n\n\n # attainable_mass_simulate and weighted_stddev are taken directly from grind size application source code, adapted only\n # to work with this code\n #Method to calculate attainable mass\n def attainable_mass_simulate(volumes):\n \n #This could be done better analytically\n depth_limit = 0.1 #mm\n \n radii = (3.0/4.0*volumes/np.pi)**(1/3)\n unreachable_volumes = np.full(volumes.size, 0.0)\n \n iboulders = np.where(radii > depth_limit)\n unreachable_volumes[iboulders[0]] = 4.0/3.0*np.pi*(radii[iboulders[0]] - depth_limit)**3\n reachable_volumes = volumes - unreachable_volumes\n \n return reachable_volumes\n\n def weighted_stddev(data, weights, frequency=True, unbiased=True):\n \n #Calculate the bias correction estimator\n if unbiased is True:\n if frequency is True:\n bias_estimator = (np.nansum(weights) - 1.0)/np.nansum(weights)\n else:\n bias_estimator = 1.0 - (np.nansum(weights**2))/(np.nansum(weights)**2)\n else:\n bias_estimator = 1.0\n \n #Normalize weights\n weights /= np.nansum(weights)\n \n #Calculate weighted average\n wmean = np.nansum(data*weights)\n \n #Deviations from average\n deviations = data - wmean\n \n #Un-biased weighted variance\n wvar = np.nansum(deviations**2*weights)/bias_estimator\n \n #Un-biased weighted standard deviation\n wstddev = np.sqrt(wvar)\n \n return wstddev\n\n grindSetting=np.arange(1,N+1)\n ##### Extract Data from .csv files #####\n # Loop through each grind setting\n for a in range(1,N+1): # need to start at 1 because filenames are 1-N\n # import each file, list the values, then change the lists into arrays\n statsArray = np.array(pd.read_csv(\"data/setting%d_stats.csv\" % a))\n# statsArray = np.array(statsList)\n\n \n # get the value from each stats column in the order: '', avg_diam, std_diam, avg_surface, std_surface, efficiency, quality\n avgDiam.append(float(statsArray[:,1]))\n #stdDiam.append(float(statsArray[:,2]))\n avgSurf.append(float(statsArray[:,3])); stdSurf.append(float(statsArray[:,4]))\n efficiency.append(float(statsArray[:,5])); quality.append(float(statsArray[:,6]))\n\n # get the value from each .csv column in the order: ID, surface, roundness, short_axis, long_axis, volume, pixel_scale\n # data for grind setting 'a' can be retrieved as parameter[a]\n settingArray = np.array(pd.read_csv(\"data/setting%d.csv\" % a))\n# settingArray = np.array(settingList)\n \n\n pixel_scale = settingArray[:,6]\n surfaces = settingArray[:,1]/pixel_scale**2\n volumes = settingArray[:,5]/pixel_scale**3\n attainable_masses = attainable_mass_simulate(volumes)\n data_weights = surfaces\n weights = np.maximum(np.ceil(attainable_masses/(coffee_cell_size/1e3)**3),1)\n surfacesAverage = np.sum(surfaces*weights)/np.sum(weights)\n stdDiamUpper.append(np.max(surfaces)-surfacesAverage)\n stdDiamLower.append(surfacesAverage-np.min(surfaces))\n surfacesStats = stats.describe(surfaces)\n skewness.append(surfacesStats[4])\n kurtosis.append(surfacesStats[5])\n\n # Calculate the average adjustment made between each whole-number grind setting and print results\n for b in range(0,N-1):\n settingAdjustment.append(avgSurf[b+1] - avgSurf[b])\n avgAdjustment = np.sum(settingAdjustment)/len(settingAdjustment)\n\n print()\n print(\"-----------------------------------------------\")\n print(\"---------Grinder Adjustment Parameters --------\")\n print()\n print(\"Total Adjustment Range (Setting {}-1): {:.2}mm^2\".format(N,avgSurf[-1]-avgSurf[0]))\n print(\"Average Adjustment Between Each Setting: {:.2}mm^2\".format(avgAdjustment))\n print()\n\n ##### Information To Plot ##### \n print()\n whichInformation = input(\"Which information would you like to view? (d)iameter,(s)urface: \")\n dataTypes=[avgDiam, stdDiam, avgSurf, stdSurf]\n def dataType(type):\n global data, dataError, pltTitle, units\n if type == \"d\":\n data = dataTypes[0]\n dataError = dataTypes[1]\n pltTitle = \"Average Diameter\"\n units = \"mm\"\n elif type == \"s\":\n data = dataTypes[2]\n dataError = dataTypes[3]\n pltTitle = \"Average Surface Area\"\n units = \"mm^2\"\n return data, dataError\n dataType(whichInformation)\n\n\n ##### Fitting #####\n print()\n # Ask user which regression form to use\n #fitType = input(\"Which Fit Type Would You Like? (l)inear,(q)uad: \")\n fitType = 'l'\n fitTypes = [funcLinear, funcQuad]\n def fittingFunction(type):\n global fitTypePlot\n\n # If the type is linear, perform all linear-regression related procedures\n if type == \"l\":\n popt, pcov = curve_fit(funcLinear, grindSetting, data, maxfev=2000) # the regression\n perr = np.sqrt(np.diag(pcov)) # error in the regression\n ss_res = np.sum((data - funcLinear(grindSetting,*popt))**2)\n ss_tot = np.sum ((data-np.mean(data))**2)\n r_squared = 1 - (ss_res/ss_tot)\n plt.plot(grindSetting, funcLinear(grindSetting, *popt), label=\"Linear Fit\", color='green') # plots the regression against grind setting\n fitTypePlot = fitTypes[0]\n# plt.text(grindSetting[0],data[-1],r'$Equation\\ of\\ Linear\\ Fit: y={:.2}x +({:.2})$'.format(popt[0],popt[1])) # generate equation of fit on figure\n# plt.text(grindSetting[0],data[9],r'$R^2={:.2}$'.format(r_squared)) # generate equation of fit r^2 value on figure\n print()\n print(\"------------- Fit Parameters ------------\") \n print(\"\\n Slope = {:.2} +/- {:.2}\".format(popt[0],perr[1]))\n print(\"\\n Intercept = {:.2} +/- {:.2}mm\".format(popt[1],perr[0]))\n print(\"\\n R^2 = {:.2}\".format(r_squared))\n print()\n\n elif type == \"q\":\n popt, pcov = curve_fit(funcQuad, grindSetting, data, maxfev=2000)\n perr = np.sqrt(np.diag(pcov))\n plt.plot(grindSetting, funcQuad(grindSetting, *popt), label=\"Quadratic Fit\", color='green')\n plt.text(grindSetting[0],data[10],r'$Equation\\ of\\ Quadratic\\ Fit: y={:.2}x^2+{:.2}x+{:.2}$'.format(popt[0],popt[1],popt[2]))\n fitTypePlot = fitTypes[1]\n\n print()\n print(\"------------- Fit Parameters ------------\") \n print(\"\\n a: {:.2} +/- {:.2}\".format(popt[0],perr[0]))\n print(\"\\n b: {:.2} +/- {:.2}\".format(popt[1],perr[1]))\n print(\"\\n c: {:.2} +/- {:.2}\".format(popt[2],perr[2]))\n print()\n return popt, pcov, fitTypePlot\n fittingFunction(fitType)\n\n\n ##### Plotting #####\n# input(\"Press Enter To Continue To Plots....\")\n plt.title(\"{} vs. Grind Setting\".format(pltTitle))\n plt.xlabel(\"Grind Setting\")\n plt.xlim([0,N+1])\n plt.xticks(ticks=grindSetting)\n plt.ylabel(\"{} [{}]\".format(pltTitle,units))\n\n plt.errorbar(grindSetting,data, fmt='o', color='black', ecolor='red', capsize=2, label='{}'.format(pltTitle), xerr=smallestIncrement/2, yerr=dataError)\n# plt.errorbar(grindSetting,data, fmt='o', color='black', ecolor='red', capsize=2, label='{}'.format(pltTitle), xerr=smallestIncrement/2, yerr=[stdDiamLower,stdDiamUpper])\n\n for i in range(0,len(avgDiam)):\n # Annotate the values for the errorbars on the graph, each for upper and lower.\n plt.annotate(data[i],(grindSetting[i]+.1*max(data),data[i]), color='black')\n plt.annotate(dataError[i],(grindSetting[i],dataError[i]+data[i]), color='red', label=\"error\")\n# plt.annotate(data[i],(grindSetting[i],\"{:.2}\".format(skewness[i])), color='red', label=\"error\")\n# plt.annotate(\"{:.2}\".format(stdDiamLower[i]),(grindSetting[i],stdDiamLower[i]-data[i]),color='purple',label='lower error')\n# plt.annotate(\"{:.2}\".format(stdDiamUpper[i]),(grindSetting[i],stdDiamUpper[i]+data[i]),color='orange',label='upper error')\n plt.legend()\n plt.savefig(\"{} Plot.png\".format(pltTitle), dpi=199)\n plt.show()\n\n", "sub_path": "grindSize/capressoInfinity/grinderAnalysis.py", "file_name": "grinderAnalysis.py", "file_ext": "py", "file_size_in_byte": 8777, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.nansum", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.stats.describe", "line_number": 92, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}]} +{"seq_id": "222528979", "text": "from ast import literal_eval #\nfrom nltk.stem import PorterStemmer\nimport math\nimport os\nimport sys\nimport re\n\n# Tokenization class which has a tokenization function\nclass Tokenization:\n def __init__(self):\n self.tokens_list = []\n # This function takes a opened file as a input argument, and it tokenizes the file with several regular expressions.\n # Regex patterns are compiled for matching several patterns that occur in the file.\n def tokenization(self, file):\n\n date_rx = re.compile(r'((?:0[1-9]|[12][0-9]|3[01])[./-](?:(?:0?[1-9]|1[0-2])|(?:\\w+))[./-](?:(?:\\d{2})?\\d{2}))') # Regex for Date\n single_quotes_rx = re.compile(r'(?:^|\\s)\\'([^\\']*?)\\'(?:$|\\s)') # Regex for single Quotes\n hyphenated_rx = re.compile(r\"([\\w]+(?:\\n-[\\w]+)+)\") # Regex for hyphenated words\n name1_rx = re.compile(r\"([A-Z][\\w]+[.'\\s?](?:[A-Z]['.]\\s?)[A-Z][\\w]+(?:.[A-Z][\\w]+)?)\") # Regex for finding names\n cont_capital_rx = re.compile(r\"([A-Z][a-z]+[ ](?:[A-Z][a-z]+[ ]?)+)\") # Regex for matching continoous captial words\n acronyms_rx = re.compile(r\"((?:[A-Z]\\.)+(?:[A-Z]+))\") # Regex for acronyms\n contraction_rx = re.compile(r\"([\\w]+'[\\w]+)\") # regex for contraction words\n\n puncList = [\".\", \";\", \":\", \"!\", \"?\", \"/\", \"\\\\\", \",\", \"#\", \"@\", \"$\", \"&\", \")\", \"(\", \"\\\"\",'\\n','-','_']\n # punctuation list is pre defined for removing punctuation present in the list from the tokenized words\n\n # Empty lists are declared for storing the matched words in their coresponding list\n date_list = []\n single_quotes = []\n hyphenated_list = []\n name1_list = []\n cont_capital_list = []\n acronyms_list = []\n contraction_list = []\n\n # Getting IP address from the file\n if re.search(IP_rx,file):\n IP_address_list = re.findall(IP_rx,file) # all matched groups are stored in the list\n # print('IP',IP_address_list)\n for i in range(len(IP_address_list)):\n file = file.replace(IP_address_list[i], '') # after a group is matched, its removed from the file\n IP_address_list[i] = str(IP_address_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting Email patterns from the file\n if re.search(email_rx,file):\n email_list = re.findall(email_rx,file) # all matched groups are stored in the list\n # print('Email', email_list)\n for i in range(len(email_list)):\n file = file.replace(email_list[i], '') # after a group is matched, its removed from the file\n email_list[i] = str(email_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting Date patterns from the file\n if re.search(date_rx,file):\n date_list = re.findall(date_rx,file) # all matched groups are stored in the list\n # print('date', date_list)\n for i in range(len(date_list)):\n file = file.replace(date_list[i], '') # after a group is matched, its removed from the file\n date_list[i] = str(date_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting URL patterns from the file\n if re.search(URL_rx, file):\n URL_list = re.findall(URL_rx, file) # all matched groups are stored in the list\n for i in range(len(URL_list)):\n file = file.replace(URL_list[i], '') # after a group is matched, its removed from the file\n URL_list[i] = str(URL_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n no_removal = URL_list + date_list + email_list + IP_address_list\n # URL, Date, Email and IP adress list are combined as a single list\n\n # Getting Single quotes patterns from the file\n if re.search(single_quotes_rx,file):\n single_quotes = re.findall(single_quotes_rx,file) # all matched groups are stored in the list\n # print('Quotes', single_quotes)\n for i in range(len(single_quotes)):\n file = file.replace(single_quotes[i], '') # after a group is matched, its removed from the file\n single_quotes[i] = str(single_quotes[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting Hyphenated patterns from the file\n if re.search(hyphenated_rx,file):\n hyphenated_list = re.findall(hyphenated_rx,file) # all matched groups are stored in the list\n # print('hyphenated_rx',hyphenated_list)\n for i in range(len(hyphenated_list)):\n file = file.replace(hyphenated_list[i], '') # after a group is matched, its removed from the file\n hyphenated_list[i] = str(hyphenated_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting name patterns from the file\n if re.search(name1_rx,file):\n name1_list = re.findall(name1_rx,file) # all matched groups are stored in the list\n # print('name',name1_list)\n for i in range(len(name1_list)):\n file = file.replace(name1_list[i], '') # after a group is matched, its removed from the file\n name1_list[i] = str(name1_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting contraction word patterns from the file\n if re.search(contraction_rx,file):\n contraction_list = re.findall(contraction_rx,file) # all matched groups are stored in the list\n # print('contraction',contraction_list)\n for i in range(len(contraction_list)):\n file = file.replace(contraction_list[i], '') # after a group is matched, its removed from the file\n if '\\'s' in contraction_list[i]:\n contraction_list[i] = contraction_list[i].replace('\\'s','')\n contraction_list[i] = str(contraction_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting continous capital words patterns from the file\n if re.search(cont_capital_rx,file):\n cont_capital_list = re.findall(cont_capital_rx,file) # all matched groups are stored in the list\n # print('cont_capital',cont_capital_list)\n for i in range(len(cont_capital_list)):\n file = file.replace(cont_capital_list[i], '') # after a group is matched, its removed from the file\n cont_capital_list[i] = str(cont_capital_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting acronyms patterns from the file\n if re.search(acronyms_rx,file):\n acronyms_list = re.findall(acronyms_rx,file) # all matched groups are stored in the list\n # print('acronyms',acronyms_list)\n for i in range(len(acronyms_list)):\n file = file.replace(acronyms_list[i], '') # after a group is matched, its removed from the file\n acronyms_list[i] = str(acronyms_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n self.tokens_list = hyphenated_list + name1_list + cont_capital_list +acronyms_list + contraction_list + single_quotes\n # The tokens_list contains all tokens from hyphenated list, name list, cont_capital_list + acronyms List and from single quote list\n\n # The tokens_list is iterated for removing \\n whihc also has '-',\n # this case is occuring for hyphenated words that are matched from the file\n for i in range(len(self.tokens_list)):\n if '\\n' in self.tokens_list[i]:\n self.tokens_list[i] = self.tokens_list[i].replace('\\n','')\n if '-' in self.tokens_list[i]:\n self.tokens_list[i] = self.tokens_list[i].replace('-','')\n\n # All the tokens in tokens_list are iterated for punctuation removal\n for punct in range(len(puncList)):\n for word in range(len(self.tokens_list)):\n if puncList[punct] in self.tokens_list[word]:\n self.tokens_list[word] = self.tokens_list[word].replace(puncList[punct], '')\n\n # After all the regex patterns are obtained, rest of the file split stored in words_list\n words_list = file.split()\n # print('words',words_list)\n\n # Punctuation removal is done for the words_list\n for punct in range(len(puncList)):\n for word in range(len(words_list)):\n if puncList[punct] in words_list[word]:\n words_list[word] = words_list[word].replace(puncList[punct],'')\n\n # print('words_no_punc', words_list)\n words_filtered = []\n for each in range(len(words_list)):\n if words_list[each] != \"''\" and words_list[each] != '':\n words_filtered.append(words_list[each])\n\n\n # print('filtered',words_filtered)\n self.tokens_list = self.tokens_list + words_filtered + no_removal # the tokens_list is updated by combining all the list containing tokens\n return self.tokens_list # the final list is returned for a single file\n\nclass Stopword_removal:\n\n # In this class a function is defined for removing the stopwords from the tokens,\n # The stp_process function takes two arguments, tokens_list and stopword file.\n\n def __init__(self):\n self.final = []\n\n def stp_process(self, file, list):\n\n stopwords_file = open(file, 'r') # the stopwords file is opened\n stopwords_list = stopwords_file.readlines()\n for i in range(len(stopwords_list)):\n stopwords_list[i] = stopwords_list[i].replace('\\n', '')\n\n # the token list is iterated and if the token is not present in the stopwords the token is appened to a new list\n for j in range(len(list)):\n list[j] = list[j].lower()\n if list[j] not in stopwords_list:\n if len(list[j]) >= 3:\n self.final.append(list[j])\n return self.final\n\nclass Stemming:\n\n # In this class is defined for stemming process\n # A function is defined as stemming_process which takes a list as an input argument\n # Porter Stemmer is used from nltk package\n\n def __init__(self):\n self.stemmed = []\n\n def stemming_process(self,token_list):\n stemmer = PorterStemmer()\n for i in range(len(token_list)):\n self.stemmed.append(stemmer.stem(token_list[i]))\n return self.stemmed", "sub_path": "Info_extract.py", "file_name": "Info_extract.py", "file_ext": "py", "file_size_in_byte": 10874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.search", "line_number": 37, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "re.search", "line_number": 45, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "re.search", "line_number": 53, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 61, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 62, "usage_type": "call"}, {"api_name": "re.search", "line_number": 71, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 72, "usage_type": "call"}, {"api_name": "re.search", "line_number": 79, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 80, "usage_type": "call"}, {"api_name": "re.search", "line_number": 87, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 88, "usage_type": "call"}, {"api_name": "re.search", "line_number": 95, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 96, "usage_type": "call"}, {"api_name": "re.search", "line_number": 105, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 106, "usage_type": "call"}, {"api_name": "re.search", "line_number": 113, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 114, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "570563725", "text": "from lib import action\n\n\nclass ConsulCatalogServicesAction(action.ConsulBaseAction):\n def run(self,\n index=None,\n wait=None,\n consistency=None,\n dc=None,\n token=None):\n\n return (True, self.consul.catalog.services(\n index=index,\n wait=wait,\n consistency=consistency,\n dc=dc,\n token=token))\n", "sub_path": "actions/catalog_services.py", "file_name": "catalog_services.py", "file_ext": "py", "file_size_in_byte": 409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "lib.action.ConsulBaseAction", "line_number": 4, "usage_type": "attribute"}, {"api_name": "lib.action", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "159142535", "text": "# Copyright © 2020 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Retrieve the aliases for the entity.\"\"\"\nfrom http import HTTPStatus\n\nfrom flask import jsonify, request\nfrom flask_restx import Resource, cors\n\nfrom legal_api.models import Alias, Business\nfrom legal_api.utils.util import cors_preflight\n\nfrom .api_namespace import API\n\n\n@cors_preflight('GET,')\n@API.route('//aliases', methods=['GET', 'OPTIONS'])\n@API.route('//aliases/', methods=['GET', 'OPTIONS'])\nclass AliasResource(Resource):\n \"\"\"Business Aliases service.\"\"\"\n\n @staticmethod\n @cors.crossdomain(origin='*')\n def get(identifier, alias_id=None):\n \"\"\"Return a JSON of the aliases.\"\"\"\n business = Business.find_by_identifier(identifier)\n\n if not business:\n return jsonify({'message': f'{identifier} not found'}), HTTPStatus.NOT_FOUND\n\n # return the matching alias\n if alias_id:\n alias, msg, code = AliasResource._get_alias(business, alias_id)\n return jsonify(alias or msg), code\n\n aliases_list = []\n\n alias_type = request.args.get('type')\n if alias_type:\n aliases = Alias.find_by_type(business.id, alias_type.upper())\n else:\n aliases = business.aliases.all()\n\n for alias in aliases:\n alias_json = alias.json\n aliases_list.append(alias_json)\n\n return jsonify(aliases=aliases_list)\n\n @staticmethod\n def _get_alias(business, alias_id=None):\n # find by ID\n alias = None\n if alias_id:\n rv = Alias.find_by_id(alias_id=alias_id)\n if rv:\n alias = {'alias': rv.json}\n\n if not alias:\n return None, {'message': f'{business.identifier} alias not found'}, HTTPStatus.NOT_FOUND\n\n return alias, None, HTTPStatus.OK\n", "sub_path": "legal-api/src/legal_api/resources/v1/business/business_aliases.py", "file_name": "business_aliases.py", "file_ext": "py", "file_size_in_byte": 2405, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask_restx.Resource", "line_number": 29, "usage_type": "name"}, {"api_name": "legal_api.models.Business.find_by_identifier", "line_number": 36, "usage_type": "call"}, {"api_name": "legal_api.models.Business", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 39, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "legal_api.models.Alias.find_by_type", "line_number": 50, "usage_type": "call"}, {"api_name": "legal_api.models.Alias", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 58, "usage_type": "call"}, {"api_name": "flask_restx.cors.crossdomain", "line_number": 33, "usage_type": "call"}, {"api_name": "flask_restx.cors", "line_number": 33, "usage_type": "name"}, {"api_name": "legal_api.models.Alias.find_by_id", "line_number": 65, "usage_type": "call"}, {"api_name": "legal_api.models.Alias", "line_number": 65, "usage_type": "name"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 70, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 70, "usage_type": "name"}, {"api_name": "http.HTTPStatus.OK", "line_number": 72, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 72, "usage_type": "name"}, {"api_name": "legal_api.utils.util.cors_preflight", "line_number": 26, "usage_type": "call"}, {"api_name": "api_namespace.API.route", "line_number": 27, "usage_type": "call"}, {"api_name": "api_namespace.API", "line_number": 27, "usage_type": "name"}, {"api_name": "api_namespace.API.route", "line_number": 28, "usage_type": "call"}, {"api_name": "api_namespace.API", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "176954048", "text": "import os\nimport glob\nimport cdms2\nimport cdutil\nimport numpy as np\nimport csv\nfrom varid_dict import varid_longname\nfrom utils import climo\n\ndef var_seasons(var, seasons):\n \"Calculate seasonal climatology of each variable\"\n var_season_data = np.empty([len(seasons)])*np.nan\n cdutil.setTimeBoundsMonthly(var)\n for k, season in enumerate(seasons):\n if season == 'ANN':\n months = cdutil.times.Seasons('DJFMAMJJASON')\n else:\n months = cdutil.times.Seasons(str(season))\n var_season_data[k] = months.climatology(var)\n # convert units\n if var.id == 'tas':\n var_season_data = var_season_data-273.15\n\n if var.id == 'pr':\n var_season_data = var_season_data*3600.*24.\n \n \n return var_season_data\n\n\ndef seasonal_mean_table(parameter):\n \"\"\"Calculate seasonal mean climatology\"\"\"\n variables = parameter.variables\n seasons = parameter.season\n test_path = parameter.test_data_path\n obs_path = parameter.obs_path\n cmip_path = parameter.cmip_path\n output_path = parameter.output_path\n sites = parameter.sites\n \n test_model = parameter.test_data_set \n ref_models = parameter.ref_models\n\n # Calculate for test model\n test_var_season=np.empty([len(variables),len(seasons)])*np.nan\n test_file = glob.glob(os.path.join(test_path,'*'+test_model+'*mo*'+ sites[0]+'.nc')) #read in monthly test data\n if len(test_file) == 0:\n raise RuntimeError('No monthly data for test model were found.')\n \n fin = cdms2.open(test_file[0])\n \n print('test_model',test_model)\n\n for j, variable in enumerate(variables): \n try:\n var = fin (variable)\n #test_var_season[j, :] = var_seasons(var, seasons)\n test_var_season[j, :] = climo(var, seasons)\n\n except:\n print(variable+\" not processed for \" + test_model)\n fin.close()\n\n # Calculate for observational data\n obs_var_season=np.empty([len(variables),len(seasons)])*np.nan\n print('ARM data')\n if sites[0] == 'sgp':\n obs_file = glob.glob(os.path.join(obs_path,'*ARMdiag*monthly_stat_'+ sites[0]+'.nc')) #read in monthly test data\n fin = cdms2.open(obs_file[0])\n for j, variable in enumerate(variables): \n try:\n var = fin (variable)\n #obs_var_season[j, :] = var_seasons(var, seasons)\n obs_var_season[j, :] = climo(var, seasons)\n \n except:\n print(variable+\" not processed for obs\")\n fin.close()\n else:\n obs_file = glob.glob(os.path.join(obs_path,'*ARMdiag*monthly_climo*'+ sites[0]+'.nc')) #read in monthly test data\n fin = cdms2.open(obs_file[0]) \n for j, variable in enumerate(variables): \n try:\n var = fin (variable) \n \n #tmp\n obs_var_season[j,1:] = np.nanmean(np.reshape(var, (4,3)),axis=1)\n if variable == 'tas':\n obs_var_season[j,1:] = obs_var_season[j,1:] -273.15\n if variable == 'pr':\n obs_var_season[j,1:] = obs_var_season[j,1:] * 24.0\n if variable == 'prw':\n obs_var_season[j,1:] = obs_var_season[j,1:] * 10.0\n obs_var_season[j,0] = np.nanmean(obs_var_season[j,1:])\n \n #var24 = np.concatenate((var,var),axis=0)\n \n except:\n print(variable+\" not processed for obs\")\n fin.close() \n \n \n \n # Calculate cmip model seasonal mean climatology\n cmip_var_season=np.empty([len(ref_models),len(variables),len(seasons)])*np.nan\n \n for i, ref_model in enumerate(ref_models):\n ref_file = glob.glob(os.path.join(cmip_path,'*'+ref_model+'*mo*'+ sites[0]+'.nc')) #read in monthly cmip data\n print('ref_model', ref_model)\n if not ref_file :\n print(ref_model+\" not found!\") \n else:\n fin = cdms2.open(ref_file[0])\n \n for j, variable in enumerate(variables): \n try:\n var = fin (variable)\n #cmip_var_season[i, j, :] = var_seasons(var, seasons)\n cmip_var_season[i, j, :] = climo(var, seasons)\n\n except:\n print(variable+\" not processed for \" + ref_model)\n fin.close() \n # Calculate multi-model mean\n mmm_var_season = np.nanmean(cmip_var_season,axis=0)\n \n\n # Save data as a table\n #header=['Variables','Model','Obs','Model-Obs','CMIP5','RMSE']\n header=['Variables','Model','Obs','Model-Obs','CMIP5']\n var_longname = [ varid_longname[x] for x in variables]\n table_data = np.empty([len(variables),len(seasons),4])\n\n for k, season in enumerate(seasons):\n for j, variable in enumerate(variables):\n table_data[j,k,:] = (round(test_var_season[j,k],3), round(obs_var_season[j,k],3),round(test_var_season[j,k]-obs_var_season[j,k],3),round(mmm_var_season[j,k],3))\n \n with open (output_path+'/metrics/seasonal_mean_table_'+season+'_'+sites[0]+'.csv','w') as f1:\n writer=csv.writer(f1, delimiter=',',lineterminator='\\n', quoting=csv.QUOTE_NONE)\n writer.writerow(header)\n #use tuple to generate csv \n writer.writerows([c]+row.tolist() for c, row in zip(var_longname,table_data[:,k,:]))\n\n \n \n \n \n \n", "sub_path": "arm_diags/src/seasonal_mean.py", "file_name": "seasonal_mean.py", "file_ext": "py", "file_size_in_byte": 5431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.empty", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cdutil.setTimeBoundsMonthly", "line_number": 13, "usage_type": "call"}, {"api_name": "cdutil.times.Seasons", "line_number": 16, "usage_type": "call"}, {"api_name": "cdutil.times", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cdutil.times.Seasons", "line_number": 18, "usage_type": "call"}, {"api_name": "cdutil.times", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 45, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.climo", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 65, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.climo", "line_number": 74, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 105, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.climo", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 125, "usage_type": "call"}, {"api_name": "varid_dict.varid_longname", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 132, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 139, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONE", "line_number": 139, "usage_type": "attribute"}]} +{"seq_id": "530099107", "text": "import json\n\nfrom django.utils.cache import add_never_cache_headers\nfrom django.views.generic import View\nfrom django.db import DatabaseError\nfrom django.core.exceptions import ValidationError\n\nfrom settings import DEBUG\nfrom .ApiResponse import ApiResponse\nfrom ...UserManager import UserManager\nfrom ...models import (ProProject, HisHistory, UsrUser,\n TskTask, MemMember, SchSchedule,\n AssAssignation)\n\n\nclass Global(View):\n\n def dispatch(self, request, *args, **kwargs):\n\n try:\n self.User = UserManager.Get(request.COOKIES.get('token', None))\n self.Token = request.COOKIES.get('token', None)\n\n if request.method.lower() in ['post', 'put']:\n self.data = json.loads(request.body.decode())\n\n if ('id' in kwargs and\n kwargs['id'] == 'me' and\n self.User is not None):\n\n kwargs['id'] = self.User.id\n\n response = super(Global, self).dispatch(request,\n *args,\n **kwargs)\n\n except json.decoder.JSONDecodeError as e:\n response = ApiResponse.Generate400('JSON Error: ' + str(e))\n\n except UsrUser.DoesNotExist as e:\n response = ApiResponse.Generate404('User not found')\n\n except TskTask.DoesNotExist as e:\n response = ApiResponse.Generate404('Task not found')\n\n except ProProject.DoesNotExist as e:\n response = ApiResponse.Generate404('Project not found')\n\n except MemMember.DoesNotExist as e:\n response = ApiResponse.Generate404('Members not found')\n\n except SchSchedule.DoesNotExist as e:\n response = ApiResponse.Generate404('Schedule not found')\n\n except HisHistory.DoesNotExist as e:\n response = ApiResponse.Generate404('Schedule not found')\n\n except AssAssignation.DoesNotExist as e:\n response = ApiResponse.Generate404('Assignation not found')\n\n except KeyError as e:\n response = ApiResponse.Generate422('Missing: ' + str(e))\n\n except TypeError as e:\n response = ApiResponse.Generate422('Invalid Data')\n\n except ValidationError as e:\n response = ApiResponse.Generate422('Invalid Data')\n\n except DatabaseError as e:\n if DEBUG:\n response = ApiResponse.Generate422('Database Error: ' + str(e))\n else:\n response = ApiResponse.Generate422('Invalid Data')\n\n except BaseException as e:\n response = ApiResponse.Generate500(e)\n\n if 'HTTP_ORIGIN' in request.META:\n response['Access-Control-Allow-Origin'] = request.META['HTTP_ORIGIN']\n else:\n response['Access-Control-Allow-Origin'] = '*'\n\n response['Access-Control-Allow-Methods'] = 'GET,POST,PUT,DELETE'\n response['Access-Control-Allow-Headers'] = 'Content-Type'\n response['Access-Control-Allow-Credentials'] = 'true'\n\n add_never_cache_headers(response)\n\n return response\n", "sub_path": "Focus/api/v1/Global.py", "file_name": "Global.py", "file_ext": "py", "file_size_in_byte": 3140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.views.generic.View", "line_number": 16, "usage_type": "name"}, {"api_name": "UserManager.UserManager.Get", "line_number": 21, "usage_type": "call"}, {"api_name": "UserManager.UserManager", "line_number": 21, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ApiResponse.ApiResponse.Generate400", "line_number": 38, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 38, "usage_type": "name"}, {"api_name": "models.UsrUser.DoesNotExist", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.UsrUser", "line_number": 40, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 41, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 41, "usage_type": "name"}, {"api_name": "models.TskTask.DoesNotExist", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.TskTask", "line_number": 43, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 44, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 44, "usage_type": "name"}, {"api_name": "models.ProProject.DoesNotExist", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.ProProject", "line_number": 46, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 47, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 47, "usage_type": "name"}, {"api_name": "models.MemMember.DoesNotExist", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.MemMember", "line_number": 49, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 50, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 50, "usage_type": "name"}, {"api_name": "models.SchSchedule.DoesNotExist", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.SchSchedule", "line_number": 52, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 53, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 53, "usage_type": "name"}, {"api_name": "models.HisHistory.DoesNotExist", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.HisHistory", "line_number": 55, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 56, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 56, "usage_type": "name"}, {"api_name": "models.AssAssignation.DoesNotExist", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.AssAssignation", "line_number": 58, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 59, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 59, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 62, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 62, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 65, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 65, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 67, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 68, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.DatabaseError", "line_number": 70, "usage_type": "name"}, {"api_name": "settings.DEBUG", "line_number": 71, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 72, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 72, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 74, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 74, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate500", "line_number": 77, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 77, "usage_type": "name"}, {"api_name": "django.utils.cache.add_never_cache_headers", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "461766479", "text": "#!/usr/bin/python3\nimport pyautogui\nfrom pytube import YouTube\nfrom colorama import Fore, Back, Style\nimport os\nimport time\nimport pyttsx3\nimport datetime\nspeak = pyttsx3.init()\ne = ['q','Q','EXIT','QUIT','quit','exit']\npassword = []\nuser = []\nwhile True:\n os.system(\"clear\")\n print(Fore.RED+\"\"\"\n ____ _______\n | ___| | _____|\n _| |_ | |_____\n |_ _| R | _____| E \n | | | |______ \n |_| |_______|\n \"\"\")\n time.sleep(0.7)\n o = input(Fore.GREEN+\"\"\"\n __________OFFICIAL-CODE_______\n | |\n | [1] CREAT FREE |\n | |\n | [2] LOGIN |\n |______________________________|\n \n>>>>>>>>>>>\"\"\");\n if o == \"1\" or o == \"CREAT FREE\" or o == \"creat free\":\n os.system(\"clear\")\n p = input(Fore.YELLOW+\"\"\"\n ----------------------------------------------\n | GMAIL : \"\"\")\n j = input(Fore.YELLOW+\"\"\" |_____________________________________________\n | PASSWORD : \"\"\")\n print(\" |_____________________________________________\")\n ji = \" Thank YOU for create account FREE\"\n po =f\"\"\"\n ______________________contact__________________\n <<<<<<<<<<<<<| |>>>>>>>>>>>>\n GMAIL : {p} \n PASSWORD : {j} \n <<<<<<<<<<<<<|_______________________________________________|>>>>>>>>>>>\n \"\"\"\n if p == \"\":\n print (\" Gmail --false--\\n\")\n time.sleep(2)\n elif j == \"\":\n print (\" password --false--\\n\")\n time.sleep(2)\n else: \n print (Fore.GREEN+po)\n print (Fore.RED+ji)\n user.append(p)\n password.append(j)\n\n os.system(\"clear\")\n elif o == \"2\" or o == \"LOGIN\" or o == \"login\":\n os.system(\"clear\")\n s = input(\"\"\"\n ----------------------------------------------\n | GMAIL : \"\"\")\n l = input(\"\"\" |_____________________________________________\n | PASSWORD : \"\"\")\n print(\" |_____________________________________________\")\n if s in user and l in password:\n os.system(\"clear\")\n print(Fore.RED+\"ROBOT: hi\")\n lw = Fore.RED+\"ROBOT: OK MY FRIEND\"\n wl = Fore.RED+\"ROBOT : OK SIR\"\n while True:\n me = input(Fore.WHITE+\"me: \")\n speak.say(me)\n if me == \"open terminal\": \n print(lw)\n os.system(\"gnome-terminal\")\n speak.say(\"ok my friend\")\n elif me == \"open firefox\":\n print(lw)\n speak.say(\"ok my friend\")\n os.system(\"firefox\")\n elif me == \"hi\" or me == \"hello\":\n print(Fore.RED+\"ROBOT: DO YOU HELP ?\")\n speak.say(\"fo you help\")\n elif me == \"yes\" or me == \"y\":\n print(Fore.WHITE + \"\"\"ROBOT:\nFACEBOOK \nINSTGRAM\nYOUTUBE\nCLOCK\nHACKING\nGOOGLE\nterminal\nmy ip\nsend brupforce\ncreate file\n:\"\"\")\n speak.say(\"facebook instgram youtube clock hacking google terminal my i p address send burpforce create file\")\n elif me == \"MY IP\" or me == \"my ip\":\n print(lw)\n os.system(\"ifconfig\")\n speak.say(\"ok my friend\")\n elif me == \"facebook\" or me == \"FACEBOOK\":\n print (wl)\n os.system(\"xdg-open https://www.facebook.com\")\n speak.say(\"ok sir\")\n elif me == \"INSTAGRAM\" or me == \"instagram\":\n print (Fore.RED+\"ROBOT : OK OPEN IMSTAGRAM\")\n os.system(\"xdg-open https://www.instagram.com\")\n speak.say(\"ok sir\")\n elif me == \"YOUTUBE\" or me == \"youtube\":\n print (\"ROBOT : OK SIR\")\n speak.say(\"ok sir\")\n os.system(\"xdg-open https://www.youtube.com\")\n elif me == \"clock\":\n print (wl)\n speak.say(\"ok sir\")\n date = datetime.datetime.now()\n speak.say(date)\n print(date)\n elif me == \"hacking\":\n print (wl)\n speak.say(\"ok sir\")\n os.system(\"xdg-open https://www.blackhat.com\")\n elif me in e:\n speak.say(\"ok exit\")\n print(Fore.RED+\"ROBOT:EXIT NOw\")\n exit()\n elif me == \"create file\": \n print (Fore.RED+\"ROBOT : OK CREAT FILE\")\n speak.say(\"ok creat file thanl you \")\n speak.runAndWait()\n o = input(\"name file for creat:\")\n f = open(o,\"w\")\n f.write(\"Editor : Official-coDe\")\n f.close()\n elif me == \"terminal\": \n while True:\n print (lw)\n speak.say(\"ok my friend\")\n speak.runAndWait()\n i = input(\"command: \")\n speak.say(i)\n speak.runAndWait()\n os.system(i)\n if i == \"back\":\n break\n print(Fore.RED+\"ROBOT : OK BACK\")\n speak.say(\"back\")\n speak.runAndWait() \n elif me == \"google\":\n os.system(\"clear\")\n print(\"GooGlE\")\n while True:\n w = input(Fore.RED+\"\"\"\n>>>>>>>>search:\"\"\")\n if w == \"back\":\n break\n print(\"ROBOT : OK BACK\")\n speak.say(\"back\") \n speak.runAndWait()\n else:\n os.system(\"xdg-open https://www.google.com/search?q=\"+w) \n elif me == \"send burpforce\":\n print (Fore.RED+\"ROBOT : Brup force attack\")\n speak.say('brup force attack')\n speak.runAndWait()\n ui = int(input(\"time:\"))\n iu = input(\"text:\")\n while True:\n time.sleep(ui)\n pyautogui.typewrite(iu)\n pyautogui.press('enter')\n speak.runAndWait()\n else:\n print (\"Gmail or password false\\n\")\n time.sleep(2)\n elif o in e:\n exit()\n os.system(\"clear\")\n else :\n print (\"what is {}\".format(o))\n time.sleep(1)\n", "sub_path": "speak.py", "file_name": "speak.py", "file_ext": "py", "file_size_in_byte": 5890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pyttsx3.init", "line_number": 9, "usage_type": "call"}, {"api_name": "os.system", "line_number": 14, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 15, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 24, "usage_type": "name"}, {"api_name": "os.system", "line_number": 34, "usage_type": "call"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 35, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 35, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 38, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 38, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 56, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 56, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 57, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 57, "usage_type": "name"}, {"api_name": "os.system", "line_number": 61, "usage_type": "call"}, {"api_name": "os.system", "line_number": 63, "usage_type": "call"}, {"api_name": "os.system", "line_number": 71, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 72, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 72, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 73, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 73, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 74, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 74, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 76, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 76, "usage_type": "name"}, {"api_name": "os.system", "line_number": 80, "usage_type": "call"}, {"api_name": "os.system", "line_number": 85, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 87, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 87, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 90, "usage_type": "name"}, {"api_name": "os.system", "line_number": 105, "usage_type": "call"}, {"api_name": "os.system", "line_number": 109, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 112, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 112, "usage_type": "name"}, {"api_name": "os.system", "line_number": 113, "usage_type": "call"}, {"api_name": "os.system", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 128, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 131, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 131, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 134, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 134, "usage_type": "name"}, {"api_name": "os.system", "line_number": 149, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 152, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 152, "usage_type": "name"}, {"api_name": "os.system", "line_number": 156, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 159, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 159, "usage_type": "name"}, {"api_name": "os.system", "line_number": 167, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 169, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 169, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 175, "usage_type": "call"}, {"api_name": "pyautogui.typewrite", "line_number": 176, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 177, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 181, "usage_type": "call"}, {"api_name": "os.system", "line_number": 184, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "151402731", "text": "import csv # lib csv for excel\nimport serial # lib serial for read the port usb Arduino uno\nfrom datetime import datetime\nimport os\nimport datetime \nimport time\n\n\nser = serial.Serial(\"COM3\", 9600) # open serial port\nser.flushInput() # input data Arduino uno\ndef createfile():\n current_date_and_time = datetime.datetime.now().strftime('%Y-%m-%d')\n current_date_and_time_string = str(current_date_and_time)\n file_name = current_date_and_time_string + \".csv\"\n if not os.path.isfile(file_name):\n with open(\"file_name.csv\", \"a\", newline='') as f: # create and open the file csv\n writer = csv.writer(f, delimiter=\",\") # Split the string, using comma, followed by a space, as a separator\n writer.writerow(\n [\"date\", \"Time\", \"Thermostat\", \"Temperature\", \"Humidity\"]) # titles for columns file excel\n\n f.close() # close file test_file.csv\n else:\n pass\n\nwhile True:\n createfile()\n \n ser_bytes = ser.readline().decode().strip().split(',') # use split(',') to seperate ser_byte string to list\n new_ser_bytes = [float(i) for i in ser_bytes] # using list comprehension to perform conversion to float\n t = time.localtime() # time location\n decoded_time1 = time.strftime('%Y-%m-%d', t) # for date\n decoded_time2 = time.strftime('%H:%M:%S', t) # for time\n\n print(decoded_time1, decoded_time2, ser_bytes) # print date , time, and data Arduino uno\n with open(\"file_name.csv\", \"a\", newline='') as f: # create and open the file csv\n writer = csv.writer(f, delimiter=\",\") # Split the string, using comma, followed by a space, as a separator\n writer.writerow([decoded_time1, decoded_time2, ser_bytes[0], ser_bytes[1], ser_bytes[2]]) # writerow with seperate data, time , data1 and data2\n f.close() # close file test_file.csv\n # 1h for sleep\n time.sleep(3600)", "sub_path": "create_csv.py", "file_name": "create_csv.py", "file_ext": "py", "file_size_in_byte": 1884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "serial.Serial", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 17, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 30, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 32, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "499103376", "text": "from typing import List\n\nfrom utils.misc import UnionFind\n\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n if not grid or not grid[0]:\n return 0\n h, w = len(grid), len(grid[0])\n uf = UnionFind([(r, c) for r in range(h) for c in range(w) if grid[r][c] == '1'])\n for r in range(h):\n for c in range(w):\n if grid[r][c] == '1':\n for y, x in [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]:\n if 0 <= y < h and 0 <= x < w and grid[y][x] == '1':\n uf.union((r, c), (y, x))\n\n return len(uf.size)\n", "sub_path": "week10/number_of_islands.py", "file_name": "number_of_islands.py", "file_ext": "py", "file_size_in_byte": 654, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "utils.misc.UnionFind", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "89124690", "text": "from models.state import State\nfrom models.city import City\nfrom flask import Flask, render_template\nfrom models import storage\n\n\n# create a Flask application object\napp = Flask(__name__)\n\n\n@app.route('/states', strict_slashes=False)\n@app.route('/states/', strict_slashes=False)\ndef states_by_id(id=\"\"):\n states = storage.all(cls=State)\n cities = storage.all(cls=City)\n return render_template('9-states.html',\n states=states.values(),\n cities=cities.values(), id=id)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port='5000')\n", "sub_path": "web_flask/9-states.py", "file_name": "9-states.py", "file_ext": "py", "file_size_in_byte": 614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "models.storage.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 14, "usage_type": "name"}, {"api_name": "models.state.State", "line_number": 14, "usage_type": "name"}, {"api_name": "models.storage.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 15, "usage_type": "name"}, {"api_name": "models.city.City", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "421084687", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom tagging.registry import register\nfrom tagging.fields import TagField\nfrom tagging.registry import register\nfrom .search import NoteIndex\n\n\nclass EmailServices(models.Model):\n hash = models.CharField(max_length=32)\n status = models.BooleanField(default=False)\n user = models.OneToOneField(User, on_delete=models.CASCADE, unique=True, null=True)\n\n class Meta:\n indexes = [\n models.Index(fields=['hash'],)\n ]\n\n\nclass Note(models.Model):\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n pub_date = models.DateTimeField(auto_now_add=True)\n last_edited_date = models.DateTimeField(auto_now=True)\n subject = models.CharField(max_length=64)\n spec_code = models.CharField(max_length=64)\n short_content = models.TextField(default='')\n content = models.TextField(default='')\n tags = TagField()\n\n def __str__(self):\n return '[' + str(self.pub_date) + '] ' + str(self.spec_code) + ' - ' + str(self.subject)\n\n def indexing(self):\n obj = NoteIndex(\n meta={'id': self.id},\n subject=self.subject,\n posted_date=self.pub_date,\n edited_date=self.last_edited_date,\n spec_code=self.spec_code,\n short_content=self.short_content,\n full_content=self.content,\n )\n obj.save()\n return obj.to_dict(include_meta=True)\n\n class Meta:\n indexes = [\n models.Index(fields=['spec_code']),\n models.Index(fields=['pub_date']),\n models.Index(fields=['last_edited_date'])\n ]\n\n\nclass Comment(models.Model):\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n note = models.ForeignKey(Note, on_delete=models.CASCADE)\n pub_date = models.DateTimeField(auto_now_add=True)\n parent_id = models.IntegerField(default=-1)\n content = models.TextField(max_length=1024)\n\n class Meta:\n indexes = [\n models.Index(fields=['author']),\n models.Index(fields=['pub_date']),\n models.Index(fields=['parent_id']),\n ]\n\n\nclass Rate(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n note = models.ForeignKey(Note, on_delete=models.CASCADE)\n mark = models.IntegerField()\n\n class Meta:\n indexes = [\n models.Index(fields=['user']),\n models.Index(fields=['note']),\n ]\n unique_together = ('user', 'note'),\n\n\nclass Like(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n\n class Meta:\n indexes = [\n models.Index(fields=['user']),\n models.Index(fields=['comment']),\n ]\n unique_together = ('user', 'comment'),\n", "sub_path": "serana/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models.Index", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "tagging.fields.TagField", "line_number": 28, "usage_type": "call"}, {"api_name": "search.NoteIndex", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models.Index", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 70, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.db.models.IntegerField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 83, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.db.models.Index", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "452669519", "text": "#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.keras.models import load_model\n\nimport datetime\nimport argparse\nimport os\nimport sys\nimport time\n\nsys.path.insert(0, '../src')\nimport dataset_manager as data_manager\nimport neural_network as nn\n\n\nTEST_PATH = \"../data/\"\nMODEL_PATH = '../models/'\nPLOT_PATH = 'plots/'\nMODEL_NAME = 'model.hdf5'\nLOG_NAME = \"signal_process.log\"\nHISTORY_NAME = \"history.png\"\nTHRESHOLD = 0.2\n\n\ndef get_directory_name():\n return 'model_{0:%Y-%m-%d_%H:%M:%S}/' \\\n .format(datetime.datetime.now())\n\n\ndef save_history(history, filename):\n plt.figure()\n plt.plot(history.history['mean_squared_error'])\n plt.plot(history.history['val_mean_squared_error'])\n plt.title('Model accuracy')\n plt.ylabel('Mean square error')\n plt.xlabel('Epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(filename)\n\n\ndef save_log(window_size, architecture, test_eval, precision, exec_time, filename):\n with open(filename, \"w\") as f:\n f.write(\"WINDOW_SIZE:{}\\n\".format(window_size))\n f.write(\"ARCHITECTURE:{}\\n\".format(architecture))\n f.write(\"EXEC_TIME:{}\\n\".format(exec_time))\n f.write(\"TEST_EVAL:{}\\n\".format(test_eval))\n f.write(\"PRECISION:{}\\n\".format(precision))\n\n\ndef save_results(results, plot, filename):\n if plot:\n os.makedirs(PLOT_PATH, exist_ok=True)\n\n with open(filename, \"w\") as f:\n for sequence, prediction, is_signal in results:\n if plot:\n plt.figure()\n plt.plot(range(len(prediction)), prediction)\n plt.savefig(PLOT_PATH + sequence.name + \"-fig.png\")\n\n f.write('{}:{}\\n'.format(sequence.name, is_signal))\n\n\ndef loading_screen(i, length):\n print(\"Processed: {}/{}\".format(i, length), file=sys.stderr)\n\n\ndef train_action(window_size, hidden_layers):\n print(\"Fetching data...\", file=sys.stderr)\n data = data_manager.fetch_protein_data(\"../data/training_data\")\n print(\"Done! Preparing for training...\", file=sys.stderr)\n input_data, signal_data = data_manager.prepare_dataset(data, window_size, loading_screen)\n print(\"Done! Train and test splitting...\", file=sys.stderr)\n train_input, test_input = data_manager.train_test_split(input_data, signal_data)\n print(\"Done\", file=sys.stderr)\n\n print(\"Building signal model\", file=sys.stderr)\n\n # Input layer\n architecture = [input_data[0].shape[0]]\n # Hidden layer\n architecture.extend(hidden_layers)\n # Output layer\n architecture.append(2)\n\n model = nn.build_model(architecture)\n\n directory = MODEL_PATH + get_directory_name()\n try:\n os.makedirs(directory)\n except:\n print(\"There was an error while creating model's sub-directory.\", file=sys.stderr)\n exit(1)\n\n start_time = time.time()\n history = nn.train_model(model, train_input[0], train_input[1],\n filename=directory + MODEL_NAME)\n exec_time = time.time() - start_time\n\n test_eval = model.evaluate(test_input[0], test_input[1])\n\n results, precision = test_sequences(data, model, window_size, eval=True)\n save_history(history, directory + HISTORY_NAME)\n save_log(window_size, architecture, test_eval, precision, exec_time, directory + LOG_NAME)\n print(\"Done.\", file=sys.stderr)\n\n\ndef predict_signal(result):\n index = None\n signals = result[:, 0]\n cleavage = result[:, 1]\n for i, c in enumerate(cleavage):\n if c > THRESHOLD:\n index = i\n break\n if not index: return False\n\n signals = signals[:index]\n maximum = len(signals)\n count = 0\n for s in signals:\n if s > THRESHOLD:\n count += 1\n\n return count / maximum > 0.9\n\n\ndef test_sequences(sequences, model, window_size, eval=False):\n results = []\n count = 0\n count_signal = 0\n for s in sequences:\n result = model.predict(data_manager.prepare_example(s, window_size))\n is_signal = predict_signal(result)\n results.append((s, result, is_signal))\n if eval:\n if (s.label == 1 and is_signal == True) or (s.label == -1 and is_signal == False): count += 1\n\n if is_signal: count_signal += 1\n print(\"{}:{}\".format(s.name, is_signal), file=sys.stderr)\n\n print(\"{}:{}/{}\".format(len(results), count_signal, len(results) - count_signal), file=sys.stderr)\n if not eval: return results\n else: return results, count/len(results)\n\n\ndef test_action(window_size, model_dir, test_file, output_file, plot):\n model = None\n try:\n model = load_model(MODEL_PATH + model_dir + '/' + MODEL_NAME)\n except:\n print(\"No such model! Please specify model with -m flag.\", file=sys.stderr)\n exit(1)\n\n sequences = []\n try:\n sequences = data_manager.get_file_sequences(TEST_PATH + test_file)\n except:\n print(\"No testing data, please put it in /data folder.\", file=sys.stderr)\n exit(1)\n\n try:\n results = test_sequences(sequences, model, window_size)\n\n\n except:\n print(\"Model was built with different window size.\", file=sys.stderr)\n exit(1)\n\n save_results(results, plot, output_file)\n\n\ndef evaluate_action(window_size, model_dir):\n data = data_manager.fetch_protein_data(\"../data/training_data\")\n results, precision = test_sequences(data, load_model(MODEL_PATH + model_dir + '/' + MODEL_NAME), window_size,\n eval=True)\n print(\"Precision:{}\".format(precision))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Peptide classifier program. Program requires to have /data and /models folders.\")\n parser.add_argument(\"--train\", action=\"store_true\",\n help=\"Use this to train model.\")\n parser.add_argument(\"--test\", action=\"store_true\",\n help=\"Use this to do tests with specific model. [-m, -w]\")\n parser.add_argument(\"--evaluate\", action=\"store_true\",\n help=\"Use this to do evaluate model with set it was trained on [-m, -w]\")\n parser.add_argument(\"-w\", \"--window_size\", type=int, default=21,\n help=\"Define used window size, default is 21.\")\n parser.add_argument(\"-p\", \"--plot\", action=\"store_true\",\n help=\"Use this to plot each sequence while testing.\")\n parser.add_argument(\"-m\", \"--model\",\n help=\"Define model's directory, used for testing.\")\n parser.add_argument(\"-o\", \"--output\", default=\"output\",\n help=\"Define output file name.\")\n parser.add_argument(\"-f\", \"--test_file\", default=\"test.fa\",\n help=\"Define test file name, stored in /data.\")\n parser.add_argument(\"-a\", \"--architecture\", nargs='*', default=[128, 64, 16],\n help=\"Define architecture of model. Provide only hidden layers, for example: -a 8 4 will yield\"\n \"architecture INPUTx8x4xOUTPUT.\")\n\n args = parser.parse_args()\n\n if args.train:\n train_action(args.window_size, args.architecture)\n\n if args.test:\n test_action(args.window_size, args.model, args.test_file, args.output, args.plot)\n\n if args.evaluate:\n evaluate_action(args.window_size, args.model)\n", "sub_path": "src/peptide_predictor.py", "file_name": "peptide_predictor.py", "file_ext": "py", "file_size_in_byte": 7269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 65, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 69, "usage_type": "attribute"}, {"api_name": "dataset_manager.fetch_protein_data", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 71, "usage_type": "attribute"}, {"api_name": "dataset_manager.prepare_dataset", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 73, "usage_type": "attribute"}, {"api_name": "dataset_manager.train_test_split", "line_number": 74, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 77, "usage_type": "attribute"}, {"api_name": "neural_network.build_model", "line_number": 86, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 92, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 95, "usage_type": "call"}, {"api_name": "neural_network.train_model", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 105, "usage_type": "attribute"}, {"api_name": "dataset_manager.prepare_example", "line_number": 133, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 140, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 150, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 152, "usage_type": "attribute"}, {"api_name": "dataset_manager.get_file_sequences", "line_number": 157, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 167, "usage_type": "attribute"}, {"api_name": "dataset_manager.fetch_protein_data", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 175, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "234746624", "text": "import numpy as np\nimport pdb\nfrom serial import Serial\nimport cv2\nimport time\nimport math\n\nfrom Image import *\nfrom Utils import *\n\nser = Serial('/dev/ttyACM0', 4800)\nfont = cv2.FONT_HERSHEY_SIMPLEX\ndirection = 0\nImages=[]\nN_SLICES = 4\n\nfor q in range(N_SLICES):\n Images.append(Image())\n\ncapture = cv2.VideoCapture(1) # read the video stream\n# capture.set(3, 320.0) # set the width\n# capture.set(4, 240.0) # set the height\n# capture.set(5, 15) # set the frame rate\ncv2.namedWindow('frame', cv2.WINDOW_FULLSCREEN)\n\nit = 1\n\nwhile cv2.waitKey(1) & 0xff != ord('q'):\n \n flag, img = capture.read()\n # img = cv2.imread('img4.jpg')\n direction = 0\n img = RemoveBackground(img, False)\n if img is not None:\n t1 = time.clock()\n SlicePart(img, Images, N_SLICES)\n for i in range(N_SLICES):\n direction += Images[i].dir\n \n #negative error: right of middle\n #positive error: left of middle\n error1 = Images[0].dir #\n error2 = Images[1].dir \n error3 = Images[2].dir\n error4 = Images[3].dir #error of furthest part of line\n \n slope = (Images[3].x_coord - Images[0].x_coord)/180.0\n theta = round(math.degrees(math.atan(slope)), 2)\n\n fm = RepackImages(Images)\n t2 = time.clock()\n cv2.putText(fm,\"Time: \" + str((t2-t1)*1000) + \" ms\", (10, 470), font, 0.5, (0,0,255), 1, cv2.LINE_AA)\n \n # for i in range(N_SLICES):\n # cv2.imshow(\"part %d\" % i, Images[i].image)\n # print('error1: ', error1)\n # print('error2: ', error2)\n # print('error3: ', error3)\n # print('error4: ', error4, end=\"\\n\\n\")\n\n cv2.imshow(\"frame\", fm)\n print('Slope: ', slope)\n print('Angle: ', theta)\n print('Iteration: ', it)\n\n # binary = \"{0:b}\".format(theta)\n \n ser.write(str.encode(str(theta) + \"\\n\"))\n it = it + 1\n\n # connection.sendall( bytes(str(direction).encode('utf8')) )\n\ncv2.destroyAllWindows()", "sub_path": "VisionFollowing/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "serial.Serial", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.WINDOW_FULLSCREEN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 35, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 48, "usage_type": "call"}, {"api_name": "math.atan", "line_number": 48, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "359045058", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 15 02:27:42 2021\r\n\r\n@author: Kayserend\r\n\"\"\"\r\n# importing button widget from kivy framework\r\nfrom kivy.uix.button import Button\r\nfrom kivy.app import App\r\nfrom kivy.core.window import Window\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.image import Image\r\nfrom kivy.core.audio import SoundLoader\r\nfrom kivy.clock import Clock\r\nfrom kivy.uix.label import Label\r\nfrom random import *\r\nimport time\r\nfrom android.permissions import request_permissions, Permission\r\n\r\nrequest_permissions([Permission.READ_EXTERNAL_STORAGE])\r\n \r\n# this is the main class which \r\n# will render the whole application\r\n\r\n\r\nclass firstApp(App):\r\n \r\n # method which will render our application\r\n def closeApp(self):\r\n # closing application\r\n App.get_running_app().stop()\r\n # removing window\r\n Window.close()\r\n \r\n def playSound(self):\r\n sound=SoundLoader.load(self.text)\r\n if sound:\r\n sound.volume=0.5\r\n sound.play()\r\n \r\n def playSoundRandom(self):\r\n a=randint(1,100)\r\n if a>=2:\r\n sound=SoundLoader.load(\"bruh.ogg\")\r\n else:\r\n sound=SoundLoader.load(\"test_sound.ogg\")\r\n if sound:\r\n sound.volume=0.5\r\n sound.play()\r\n \r\n \r\n def build(self):\r\n layout=GridLayout(cols=3)\r\n fotito=Image(source=\"./kawaii.jpg\")\r\n fotito2=Image(source=\"./kawaii.jpg\")\r\n #primer boton\r\n #name=\"test_sound.mp3\"\r\n btnsound=Button(text ='what.ogg',color =(0,0,0,0),background_normal='what.jpg')\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n #sound=SoundLoader.load(\"test_sound2.mp3\");\r\n btnsound.bind(on_press=firstApp.playSound)\r\n layout.add_widget(btnsound)\r\n #segundo boton\r\n btnsound=Button(text ='horse.ogg',color =(0,0,0,0),background_normal='horseamazing.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #tercer boton\r\n btnsound=Button(text ='door.ogg',color =(0,0,0,0),background_normal='doory.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #cuarto boton\r\n btnsound=Button(text ='bear.ogg',color =(0,0,0,0),background_normal='oso.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #quinto boton\r\n btnsound=Button(text ='dog.ogg',color =(0,0,0,0),background_normal='perro.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #sexto boton\r\n btnsound=Button(text ='scream.ogg',color =(0,0,0,0),background_normal='aaah.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #septimo boton\r\n btnsound=Button(text ='explosion.ogg',color =(0,0,0,0),background_normal='boom.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #octavo boton\r\n btnsound=Button(text ='aguila.ogg',color =(0,0,0,0),background_normal='pajarito.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #noveno boton\r\n btnsound=Button(text ='NONE',color =(0,0,0,0),background_normal='bruh.jpg')\r\n btnsound.bind(on_press=firstApp.playSoundRandom)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n \r\n clock=IncrediblyCrudeClock()\r\n Clock.schedule_interval(clock.update, 1)\r\n layout.add_widget(fotito)\r\n #este boton cierra la app\r\n btn=Button(text=\"Bye world\")\r\n btn.bind(on_press=firstApp.closeApp)\r\n layout.add_widget(btn)\r\n layout.add_widget(clock)\r\n return layout\r\n \r\nclass IncrediblyCrudeClock(Label):\r\n\tdef update(self,*args):\r\n\t\tself.text=time.asctime()\r\n\r\n# running the application\r\nfirstApp().run()\r\n", "sub_path": "very_useful_soundboard/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "android.permissions.request_permissions", "line_number": 20, "usage_type": "call"}, {"api_name": "android.permissions.Permission.READ_EXTERNAL_STORAGE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "android.permissions.Permission", "line_number": 20, "usage_type": "name"}, {"api_name": "kivy.app.App", "line_number": 26, "usage_type": "name"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 31, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 31, "usage_type": "name"}, {"api_name": "kivy.core.window.Window.close", "line_number": 33, "usage_type": "call"}, {"api_name": "kivy.core.window.Window", "line_number": 33, "usage_type": "name"}, {"api_name": "kivy.core.audio.SoundLoader.load", "line_number": 36, "usage_type": "call"}, {"api_name": "kivy.core.audio.SoundLoader", "line_number": 36, "usage_type": "name"}, {"api_name": "kivy.core.audio.SoundLoader.load", "line_number": 44, "usage_type": "call"}, {"api_name": "kivy.core.audio.SoundLoader", "line_number": 44, "usage_type": "name"}, {"api_name": "kivy.core.audio.SoundLoader.load", "line_number": 46, "usage_type": "call"}, {"api_name": "kivy.core.audio.SoundLoader", "line_number": 46, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 53, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 54, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 55, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 58, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 64, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 69, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 74, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 79, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 84, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 89, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 94, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 99, "usage_type": "call"}, {"api_name": "kivy.clock.Clock.schedule_interval", "line_number": 105, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 105, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 108, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 114, "usage_type": "name"}, {"api_name": "time.asctime", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "629948420", "text": "import math as m\nfrom copy import deepcopy\nfrom time import time\n\nimport cv2\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport gcransac as gc\nimport ransac as rc\nfrom utils_helper import *\n\n\n''' homogr\n'adam', 'boat', 'Boston', 'BostonLib', 'BruggeSquare', 'BruggeTower', 'Brussels', 'CapitalRegion', \n'city', 'Eiffel', 'ExtremeZoom', 'graf', 'LePoint1', 'LePoint2', 'LePoint3', 'WhiteBoard'\n'''\nif __name__ == \"__main__\":\n dataset = 'adam'\n src_img, dst_img, gt_M, vpts = load_homogr_datasets(dataset)\n\n # 创建 ORB 特征提取器\n detetor = cv2.ORB_create(2000)\n # 提取 ORB 角点特征点 keypoints,特征点提取区域局部图像 descriptions\n keypoints1, descriptions1 = detetor.detectAndCompute(src_img, None)\n keypoints2, descriptions2 = detetor.detectAndCompute(dst_img, None)\n\n # BF 暴力匹配器\n bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)\n matches = bf.match(descriptions1, descriptions2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # 根据匹配结果构建点对\n src_pts = np.float32([keypoints1[m.queryIdx].pt for m in matches]).reshape(-1, 2)\n dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in matches]).reshape(-1, 2)\n # 获取图像长宽信息 \n h1, w1, _ = np.shape(src_img)\n h2, w2, _ = np.shape(dst_img)\n \n # 输出初始获取的暴力匹配结果\n print(f\"Detect {dataset} features\")\n print(f\"Features found in src image = {len(keypoints1)}\")\n print(f\"Features found in dst image = {len(keypoints2)}\")\n print(f\"Matches number = {len(matches)}\", '\\n')\n\n threshold = 1.0\n match_img_list = []\n H, mask = None, None\n for i in range(2):\n if i == 0:\n print('RANSAC')\n H, mask = rc.findHomography(src_pts, dst_pts, threshold=threshold, conf=0.99, max_iters=10000)\n else:\n print('GC-RANSAC')\n H, mask = gc.findHomography(src_pts, dst_pts, h1, w1, h2, w2, threshold=threshold, conf=0.99, max_iters=10000)\n print('Inliers Number = ', deepcopy(mask).astype(np.float32).sum())\n print('Error = ', getReprojectionError(vpts, H), '\\n')\n", "sub_path": "test_3_iterations.py", "file_name": "test_3_iterations.py", "file_ext": "py", "file_size_in_byte": 2167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.ORB_create", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.BFMatcher", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.NORM_L2", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 35, "usage_type": "call"}, {"api_name": "math.queryIdx", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 36, "usage_type": "call"}, {"api_name": "math.trainIdx", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 39, "usage_type": "call"}, {"api_name": "ransac.findHomography", "line_number": 53, "usage_type": "call"}, {"api_name": "gcransac.findHomography", "line_number": 56, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 57, "usage_type": "attribute"}]} +{"seq_id": "167784842", "text": "#!/usr/bin/python3\n# coding: utf-8\nimport numpy as np\nimport pickle\nimport sys\nfrom skimage import color\nfrom skimage import io\nfrom skimage.transform import resize\nfrom fbRun import fbRun\nimport numpy as np\nfrom computeTextons import computeTextons\nfrom pathlib import Path\nfrom sklearn.ensemble import RandomForestClassifier\nimport time\nfrom sklearn.metrics import confusion_matrix\nfrom assignTextons import assignTextons\nimport cifar10 as cf\n\n\ndef fileExists(path):\n return Path(path).exists()\n\n\ndef toPickle(obj, name):\n pickle.dump(obj, open(name+'.pkl', \"wb\"))\n\n\ndef loadPickle(name):\n return pickle.load(open(name, \"rb\"))\n\ndef histc(X, bins):\n import numpy as np\n map_to_bins = np.digitize(X, bins)\n r = np.zeros(bins.shape)\n for i in map_to_bins:\n r[i-1] += 1\n return np.array(r)\n\n#evaluating in test data\nk = 100\nn = 10\n\nbestAlgPath = './data/bestAlgo.pkl'\n\nclf = loadPickle(bestAlgPath)\n\ntestTextonMapPath = './data/testTextonMap.pkl'\nif not fileExists(testTextonMapPath):\n print('Loading test images')\n testImgs = loadPickle('./data/testFilterResponses.pkl')\n print('Loading textons')\n textonPath = './data/mapAndTexton'+str(k)+'.pkl'\n textons = loadPickle(textonPath)['textons']\n print('Asigning textons to test images')\n textonMap = assignTextons(testImgs,textons.transpose())\n toPickle(textonMap,'./data/testTextonMap')\nelse:\n textonMap = loadPickle(testTextonMapPath)\n\nprint('Loading test labels')\ntestLabels = cf.load_cifar10('./cifar-10-batches-py/',mode='test')['labels']\n\n\nnTest = len(testLabels)\nrfPred = []\nprint('Evaluating on test set')\nfor t in range(nTest):\n print('\\r {:.2f}%'.format((t+1)*100/nTest),end='')\n img = textonMap[:,t*32:(t+1)*32]\n histo = histc(img.flatten(), np.arange(k))\n rfPred.append(clf.predict([histo])[0])\n\ntestCM = confusion_matrix(testLabels,rfPred)\n\ntoPickle(testCM,'./data/testConfusionMatrix')\nprint()\nprint('Test confusion matrix:')\nprint(testCM)\n\n\n\n\n\n", "sub_path": "05-Textons/code/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 25, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.digitize", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "assignTextons.assignTextons", "line_number": 55, "usage_type": "call"}, {"api_name": "cifar10.load_cifar10", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "467190076", "text": "# -*- coding:utf-8 -*-\nimport configparser\n\nimport pymongo\nimport time\n\nfrom src import config\nimport logging\n\nlogging.basicConfig(level=config.LOGGING_LEVEL,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n\nclass HandleUserInDatabase(object):\n def __init__(self):\n cf = configparser.ConfigParser()\n cf.read('./database.ini', encoding='utf-8')\n address = cf.get('DATABASE', 'address')\n name = cf.get('DATABASE', 'name')\n myclient = pymongo.MongoClient(address)\n mydb = myclient[name]\n self.mycol = mydb[\"User\"]\n\n def save_data(self, uid, name):\n \"\"\"\n 保存uid\n :param uid:\n :return:\n \"\"\"\n # 检查是否有重复,这里调用前应该检查的,但是重复检查以防出错\n try:\n if len(self.mycol.find_one({\"uid\": uid})) > 0:\n logging.warning('repeat uid')\n return\n except:\n self.mycol.insert(\n {'uid': uid, 'name': name, 'followed_time': time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())})\n\n def find_data(self, uid):\n \"\"\"\n 查询是否有该uid\n :param uid:\n :return:\n \"\"\"\n if len(self.mycol.find_one({\"uid\": uid})) > 0:\n return True\n return False\n\n def get_total(self):\n \"\"\"\n 返回总共个数\n :return:\n \"\"\"\n return self.mycol.find().count()\n", "sub_path": "src/systemTools/HandleUserInDatabase.py", "file_name": "HandleUserInDatabase.py", "file_ext": "py", "file_size_in_byte": 1491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "src.config.LOGGING_LEVEL", "line_number": 10, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 10, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 16, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 33, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 37, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "108073961", "text": "import asyncio\nimport logging\nimport random\nimport collections\nimport itertools\nfrom datetime import datetime\nfrom discord.ext import commands, tasks\nfrom hourai.cogs import BaseCog\nfrom hourai.db.models import Username\nfrom sqlalchemy import func\nfrom sqlalchemy.exc import OperationalError\n\nMAX_STORED_USERNAMES = 20\n\n\nclass UsernameLogging(BaseCog):\n \"\"\" Cog for logging username changes. \"\"\"\n\n def __init__(self, bot):\n super().__init__()\n self.bot = bot\n # self.cleanup_username_histories.start()\n self.pending_ids = None\n self.offset = 0\n\n def cog_unload(self):\n # self.cleanup_username_histories.cancel()\n pass\n\n @commands.Cog.listener()\n async def on_user_update(self, before, after):\n if before.name == after.name:\n return\n assert before.id == after.id\n self.bot.loop.create_task(self.log_username_change(after))\n\n @commands.Cog.listener()\n async def on_message(self, msg):\n if msg.webhook_id is not None:\n return\n self.bot.loop.create_task(self.log_username_change(msg.author))\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n self.bot.loop.create_task(self.log_username_change(member))\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n self.bot.loop.create_task(self.log_username_change(member))\n\n @commands.Cog.listener()\n async def on_member_ban(self, guild, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @commands.Cog.listener()\n async def on_member_unban(self, guild, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @commands.Cog.listener()\n async def on_group_join(self, group, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @commands.Cog.listener()\n async def on_group_remove(self, group, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @tasks.loop(seconds=0.1)\n async def cleanup_username_histories(self):\n frame_size = 5000\n try:\n with self.bot.create_storage_session() as session:\n ids = session.query(Username.user_id) \\\n .group_by(Username.user_id) \\\n .offset(self.offset).limit(frame_size)\n\n ids = [x[0] for x in ids]\n\n if len(ids) <= 0:\n self.offset = 0\n return\n\n keys = lambda u: u.user_id\n\n usernames = session.query(Username) \\\n .filter(Username.user_id.in_(ids)) \\\n .all()\n usernames = list(usernames)\n usernames.sort(key=keys)\n for user_id, names in itertools.groupby(usernames, key=keys):\n self.merge_names(names, session)\n\n if len(session.deleted) > 0:\n self.log_changes(session)\n session.commit()\n else:\n self.offset += frame_size\n except Exception:\n self.bot.logger.exception('Exception while clearing histories:')\n\n @cleanup_username_histories.before_loop\n async def before_cleanup_username_histories(self):\n await self.bot.wait_until_ready()\n\n @commands.command()\n @commands.is_owner()\n async def refresh(self, ctx):\n async with ctx.typing():\n await asyncio.gather(*[self.log_username_change(user)\n for user in ctx.bot.users])\n await ctx.send(':thumbsup:')\n\n async def log_username_change(self, user):\n # Don't log system or webhook accounts\n if int(user.discriminator) == 0:\n return\n\n timestamp = datetime.utcnow()\n\n def create_username():\n return Username(user_id=user.id, name=user.name,\n timestamp=timestamp,\n discriminator=user.discriminator)\n logged = False\n backoff = 1\n while not logged:\n try:\n with self.bot.create_storage_session() as session:\n usernames = session.query(Username) \\\n .filter_by(user_id=user.id) \\\n .order_by(Username.timestamp.desc())\n usernames = list(usernames)\n if any(n.name == user.name for n in usernames):\n return\n username = create_username()\n usernames.append(username)\n filtered = self.merge_names(usernames, session)\n if username in filtered:\n session.add(username)\n self.log_changes(session)\n session.commit()\n logged = True\n except OperationalError:\n msg = f'OperationalError: Retrying in {backoff} seconds.'\n logging.error(msg)\n delta = (random.random() - 0.5) / 5\n await asyncio.sleep(backoff * (1 + delta))\n backoff *= 2\n if backoff >= 10:\n raise\n\n def log_changes(self, session):\n if len(session.deleted) > 0:\n output = '\\n'.join(f'Deleting: {str(n)}'\n for n in session.deleted)\n self.bot.logger.info(output)\n if len(session.dirty) > 0:\n output = '\\n'.join(f'Updating: {str(n)}'\n for n in session.dirty)\n self.bot.logger.info(output)\n if len(session.new) > 0:\n output = '\\n'.join(f'Adding: {str(n)}'\n for n in session.new)\n self.bot.logger.info(output)\n\n def merge_names(self, names, session):\n names = list(names)\n if len(names) <= 1:\n return names\n names.sort(key=lambda u: u.timestamp, reverse=True)\n changed = True\n removal = set()\n while changed:\n removal.clear()\n for i, after in enumerate(names[:-1]):\n for j, before in enumerate(names[i+1:]):\n if before.name != after.name or before is after:\n continue\n before.discriminator = (before.discriminator or\n after.discriminator)\n session.add(before)\n try:\n session.delete(after)\n except Exception:\n pass\n removal.add(i)\n break\n changed = len(removal) > 0\n names = [u for idx, u in enumerate(names)\n if idx not in removal]\n if len(names) > MAX_STORED_USERNAMES:\n # Assumes the ordering is maintained\n for name in names[MAX_STORED_USERNAMES:]:\n session.delete(name)\n names = names[:MAX_STORED_USERNAMES]\n return names\n", "sub_path": "hourai/extensions/logging/username_logging.py", "file_name": "username_logging.py", "file_ext": "py", "file_size_in_byte": 7080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "hourai.cogs.BaseCog", "line_number": 16, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 30, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 30, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 37, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 37, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 43, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 43, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 47, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 47, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 47, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 51, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 51, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 51, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 55, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 55, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 55, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 59, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 59, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 59, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 63, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 63, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 63, "usage_type": "name"}, {"api_name": "hourai.db.models.Username.user_id", "line_number": 72, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 72, "usage_type": "name"}, {"api_name": "hourai.db.models.Username.user_id", "line_number": 73, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 73, "usage_type": "name"}, {"api_name": "hourai.db.models.Username", "line_number": 84, "usage_type": "argument"}, {"api_name": "hourai.db.models.Username.user_id.in_", "line_number": 85, "usage_type": "call"}, {"api_name": "hourai.db.models.Username.user_id", "line_number": 85, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 85, "usage_type": "name"}, {"api_name": "itertools.groupby", "line_number": 89, "usage_type": "call"}, {"api_name": "discord.ext.tasks.loop", "line_number": 67, "usage_type": "call"}, {"api_name": "discord.ext.tasks", "line_number": 67, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 108, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 104, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 104, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 105, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 105, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 117, "usage_type": "name"}, {"api_name": "hourai.db.models.Username", "line_number": 120, "usage_type": "call"}, {"api_name": "hourai.db.models.Username", "line_number": 128, "usage_type": "argument"}, {"api_name": "hourai.db.models.Username.timestamp.desc", "line_number": 130, "usage_type": "call"}, {"api_name": "hourai.db.models.Username.timestamp", "line_number": 130, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 130, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 142, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 144, "usage_type": "call"}, {"api_name": "random.random", "line_number": 145, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "16773158", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\nfrom .models import items\nfrom .forms import itemForm, UserCreateForm, AuthenticateForm\nfrom django.shortcuts import redirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\ndef signup(request):\n if request.method == \"POST\":\n form = UserCreateForm(request.POST)\n if form.is_valid():\n new_user = User.objects.create_user(**form.cleaned_data)\n login(new_user)\n # redirect, or however you want to get to the main view\n return HttpResponseRedirect('item_new.html')\n else:\n form = UserCreateForm()\n\n return render(request, 'signup.html', {'form': form})\ndef index(request, auth_form=None, user_form=None):\n # User is logged in\n if request.user.is_authenticated():\n item = items.objects.all()\n user = request.user\n return render(request,\n 'toDoApp/item_list.html',\n {'item': item, })\n else:\n # User is not logged in\n auth_form = auth_form or AuthenticateForm()\n user_form = user_form or UserCreateForm()\n\n return render(request,\n 'signup.html',\n {'auth_form': auth_form, 'user_form': user_form, })\n\ndef item_list(request):\n item = items.objects.all()\n return render(request, 'toDoApp/item_list.html',{'item':item})\n\ndef item_detail(request, pk):\n items.objects.get(pk=pk)\n item = get_object_or_404(items, pk=pk)\n return render(request, 'toDoApp/item_detail.html', {'item':item})\n\ndef item_new(request):\n if request.method == \"POST\":\n form = itemForm(request.POST)\n if form.is_valid():\n item = form.save(commit=False)\n item.save()\n return redirect('item_detail', pk=item.pk)\n else:\n form = itemForm()\n return render(request, 'toDoApp/item_edit.html', {'form':form})\n\ndef item_edit(request, pk):\n item = get_object_or_404(items, pk=pk)\n if request.method==\"POST\":\n form = itemForm(request.items, instance=item)\n if form.is_valid():\n item = form.save(commit=False)\n item.save()\n return redirect('item_detail', pk=item.pk)\n else:\n form = itemForm(instance=item)\n return render(request, 'toDoApp/item_edit.html',{'form':form})\n\ndef login_view(request):\n if request.methon==\"POST\":\n form = AuthenticateForm(data=request.POST)\n if form.is_valid():\n login(request, form.get_user())\n #successful\n return redirect('/')\n else:\n #failed\n return index(request, auth_form=form)\n return redirect('/')\n\ndef logout_view(request):\n logout(request)\n return redirect ('/')\n", "sub_path": "toDoApp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "forms.UserCreateForm", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.UserCreateForm", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "models.items.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "models.items.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.items", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "forms.AuthenticateForm", "line_number": 33, "usage_type": "call"}, {"api_name": "forms.UserCreateForm", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "models.items.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "models.items.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.items", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "models.items.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "models.items.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.items", "line_number": 45, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 46, "usage_type": "call"}, {"api_name": "models.items", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "forms.itemForm", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "forms.itemForm", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "models.items", "line_number": 61, "usage_type": "argument"}, {"api_name": "forms.itemForm", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "forms.itemForm", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "forms.AuthenticateForm", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 85, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "206256201", "text": "from selenium import webdriver\nfrom models.addressbook_app import AddressBookApp\ndriver = webdriver.Chrome()\nwd = AddressBookApp(driver, \"http://localhost/addressbook/\")\nwd.login('admin', 'secret')\nwd.open_group_page()\nt = driver.find_elements_by_name(\"selected[]\")\nprint(t[1].text)\nprint(t[1].tag_name)\nprint(t[1].id)\nprint(t[2].get_attribute(\"value\"))\nwd.quit()", "sub_path": "test_script.py", "file_name": "test_script.py", "file_ext": "py", "file_size_in_byte": 363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 3, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 3, "usage_type": "name"}, {"api_name": "models.addressbook_app.AddressBookApp", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "194326552", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport time\nimport logging\n\nimport Chain\n\nimport BCDataStream\nimport deserialize\nimport util\nimport base58\nimport db\nfrom exception import DBException\n\nLOG = logging.getLogger('DataStore')\n\n\nWORK_BITS = 304 # XXX more than necessary.\n\nNULL_PUBKEY_HASH = \"\\0\" * Chain.PUBKEY_HASH_LENGTH\nNULL_PUBKEY_ID = 0\nPUBKEY_ID_NETWORK_FEE = NULL_PUBKEY_ID\n\n# Size of the script and pubkey columns in bytes.\nMAX_SCRIPT = 1000000\nMAX_PUBKEY = 65\n\nSCRIPT_ADDRESS_RE = re.compile(\"\\x76\\xa9\\x14(.{20})\\x88\\xac\\x61?\\\\Z\", re.DOTALL)\nSCRIPT_PUBKEY_RE = re.compile(\n \".((?<=\\x41)(?:.{65})|(?<=\\x21)(?:.{33}))\\xac\\\\Z\", re.DOTALL)\nSCRIPT_NETWORK_FEE = '\\x6a'\n\nclass DataStore(object):\n\n def __init__(store, bytes):\n store.db = db\n store.commit_bytes = bytes #default bytes\n store.bytes_since_commit = 0\n store._blocks = {}\n store.init_binfuncs()\n store.init_chains()\n\n def init_chains(store):\n \n store.chains_by = lambda: 0\n store.chains_by.id = {}\n store.chains_by.name = {}\n store.chains_by.magic = {}\n \n chains = store.db.chain_get_all()\n \n for chain_id, magic, chain_name, chain_code3, address_version, script_addr_vers, \\\n chain_policy, chain_decimals in chains:\n \n chain = Chain.create(\n id = int(chain_id),\n magic = store.binout(magic),\n name = unicode(chain_name),\n code3 = chain_code3 and unicode(chain_code3),\n address_version = store.binout(address_version),\n script_addr_vers = store.binout(script_addr_vers),\n policy = unicode(chain_policy),\n decimals = None if chain_decimals is None else int(chain_decimals))\n\n store.chains_by.id[chain.id] = chain\n store.chains_by.name[chain.name] = chain\n store.chains_by.magic[bytes(chain.magic)] = chain\n \n \n def import_block(store, b, chain=None):\n\n tx_hash_array = []\n all_txins_linked = True\n \n b['value_in'] = 0\n b['value_out'] = 0\n b['value_destroyed'] = 0\n\n\n # 写入交易数据 获得value_in, value_out, value_destroyed\n for pos in xrange(len(b['transactions'])):\n tx = b['transactions'][pos]\n\n if 'hash' not in tx:\n tx['hash'] = chain.transaction_hash(tx['__data__'])\n\n tx_hash_array.append(tx['hash'])\n tx['tx_id'] = store.db.tx_find_id_and_value(tx, pos == 0)\n\n if tx['tx_id']:\n all_txins_linked = False\n else:\n tx['tx_id'] = store.import_tx(tx, pos == 0, chain)\n if tx.get('unlinked_count', 1) > 0:\n all_txins_linked = False\n\n if tx['value_in'] is None:\n b['value_in'] = None\n elif b['value_in'] is not None:\n b['value_in'] += tx['value_in']\n b['value_out'] += tx['value_out']\n b['value_destroyed'] += tx['value_destroyed']\n\n\n # block 表中写入数据\n block_id = int(store.new_id(\"block\"))\n b['block_id'] = block_id\n\n if b['hashMerkleRoot'] != chain.merkle_root(tx_hash_array):\n raise MerkleRootMismatch(b['hash'], tx_hash_array)\n\n #寻找父节点\n hashPrev = b['hashPrev']\n is_genesis = hashPrev == chain.genesis_hash_prev\n\n (prev_block_id, prev_height, prev_work, prev_satoshis,\n prev_seconds, prev_ss, prev_total_ss, prev_nTime) = (\n (None, -1, 0, 0, 0, 0, 0, b['nTime'])\n if is_genesis else\n store.db.find_prev(store.hashin(hashPrev)))\n\n b['prev_block_id'] = prev_block_id\n b['height'] = None if prev_height is None else prev_height + 1\n b['chain_work'] = util.calculate_work(store.binout_int(prev_work), b['nBits'])\n\n b['seconds'] = None if prev_seconds is None else (prev_seconds+b['nTime']-prev_nTime)\n \n if prev_satoshis is None or prev_satoshis < 0 or b['value_in'] is None:\n b['satoshis'] = -1-b['value_destroyed']\n else:\n b['satoshis'] = prev_satoshis+b['value_out']-b['value_in']-b['value_destroyed']\n\n if prev_satoshis is None or prev_satoshis < 0:\n ss_created = None\n b['total_ss'] = None\n else:\n ss_created = prev_satoshis * (b['nTime'] - prev_nTime)\n b['total_ss'] = prev_total_ss + ss_created\n\n if b['height'] is None or b['height'] < 2:\n b['search_block_id'] = None\n else:\n b['search_block_id'] = store.get_block_id_at_height(\n util.get_search_height(int(b['height'])),\n None if prev_block_id is None else int(prev_block_id))\n\n # Insert the block table row.\n try:\n bk = {\"block_id\" : block_id,\n \"height\": b['height'],\n \"bhash\": store.hashin(b['hash']),\n \"ntime\": store.intin(b['nTime']),\n \"mroot\": store.hashin(b['hashMerkleRoot']),\n \"version\": store.intin(b['version']),\n \"height\": b['height'],\n \"prev_block_id\": prev_block_id,\n \"chain_work\": store.binin_int(b['chain_work'], WORK_BITS),\n \"nbits\": store.intin(b['nBits']),\n \"nonce\": store.intin(b['nNonce']),\n \"value_in\": store.intin(b['value_in']),\n \"value_out\": store.intin(b['value_out']),\n \"satoshis\": store.intin(b['satoshis']),\n \"seconds\": store.intin(b['seconds']),\n \"total_ss\": store.intin(b['total_ss']),\n \"txs\": len(b['transactions']), \n \"search_id\": b['search_block_id']\n }\n store.db.insert_block(bk)\n \n except DBException:\n #异常出错\n raise\n\n # block_tx 表中写入数据\n for tx_pos in xrange(len(b['transactions'])):\n tx = b['transactions'][tx_pos]\n store.db.insert_block_tx(block_id, tx, tx_pos)\n LOG.info(\"block_tx %d %d\", block_id, tx['tx_id'])\n\n\n # block 表和block_txin 其他项写入\n if b['height'] is not None:\n store.populate_block_txin(block_id)\n if all_txins_linked or store.db.get_unlinked_txins(block_id) <= 0:\n b['ss_destroyed'] = store.db.get_block_ss_destroyed(\n block_id, b['nTime'],\n map(lambda tx: tx['tx_id'], b['transactions']))\n if ss_created is None or prev_ss is None:\n b['ss'] = None\n else:\n b['ss'] = prev_ss + ss_created - b['ss_destroyed']\n store.db.update_new_block(store.intin(b['ss']),store.intin(b['ss_destroyed']),block_id) \n else:\n b['ss_destroyed'] = None\n b['ss'] = None\n\n \n # 写入block_next 或写入 orphan_block\n if prev_block_id:\n store.db.insert_block_next(prev_block_id, block_id)\n elif not is_genesis:\n store.db.insert_orphan_block(block_id, store.hashin(b['hashPrev']))\n\n for row in store.db.get_orphan_block_id(store.hashin(b['hash'])):\n (orphan_id,) = row\n store.db.update_prev_block_id(block_id, orphan_id)\n store.db.insert_block_next(block_id, orphan_id)\n store.db.delete_orphan_block(orphan_id)\n \n # 处理孤儿块的问题\n store.offer_block_to_chains(b, chain.id)\n return block_id\n\n\n def import_tx(store, tx, is_coinbase, chain):\n \n tx_id = store.new_id(\"tx\")\n dbhash = store.hashin(tx['hash'])\n version = store.intin(tx['version'])\n locktime = store.intin(tx['lockTime'])\n \n if 'size' not in tx:\n tx['size'] = len(tx['__data__'])\n\n store.db.insert_tx(tx_id, dbhash, version, locktime, tx['size']) \n \n # 导入交易的 outputs.\n tx['value_out'] = 0\n tx['value_destroyed'] = 0\n for pos in xrange(len(tx['txOut'])):\n txout = tx['txOut'][pos]\n tx['value_out'] += txout['value']\n txout_id = store.new_id(\"txout\")\n\n pubkey_id = store.script_to_pubkey_id(chain, txout['scriptPubKey'])\n if pubkey_id is not None and pubkey_id <= 0:\n tx['value_destroyed'] += txout['value']\n\n\n txout_value = store.intin(txout['value'])\n scriptPubkey = store.binin(txout['scriptPubKey'])\n store.db.insert_txout(txout_id, tx_id, pos, txout_value, scriptPubkey, pubkey_id)\n \n for row in store.db.get_txin_by_txout_pos(dbhash, pos): \n (txin_id,) = row\n store.db.update_txin(txout_id, txin_id)\n store.db.delete_unlinked_txin(txin_id)\n\n # 导入交易的inputs.\n tx['value_in'] = 0\n tx['unlinked_count'] = 0\n for pos in xrange(len(tx['txIn'])):\n txin = tx['txIn'][pos]\n txin_id = store.new_id(\"txin\")\n\n if is_coinbase:\n txout_id = None\n else:\n prevout_hash = store.hashin(txin['prevout_hash'])\n txout_id, value = store.db.lookup_txout(prevout_hash, txin['prevout_n'])\n if value is None:\n tx['value_in'] = None\n elif tx['value_in'] is not None:\n tx['value_in'] += value\n\n\n sequence = store.intin(txin['sequence'])\n scriptSig = store.binin(txin['scriptSig']) \n store.db.insert_txin(txin_id, tx_id, pos, txout_id, scriptSig, sequence)\n \n if not is_coinbase and txout_id is None:\n tx['unlinked_count'] += 1\n prev_hash = store.hashin(txin['prevout_hash'])\n prev_n = store.intin(txin['prevout_n'])\n store.db.insert_unlinked_txin(txin_id, prev_hash, prev_n)\n \n return tx_id\n\n def offer_block_to_chains(store, b, chain_ids):\n b['top'] = store.adopt_orphans(b, 0, chain_ids, chain_ids)\n for chain_id in chain_ids:\n store._offer_block_to_chain(b, chain_id)\n \n def populate_block_txin(store, block_id):\n \n rows = store.db.get_block_txin(block_id) \n for row in rows:\n (txin_id, oblock_id) = row\n if store.is_descended_from(block_id, int(oblock_id)):\n store.db.insert_block_txin(block_id, txin_id, oblock_id)\n \n \n def is_descended_from(store, block_id, ancestor_id):\n block = store.load_block(block_id)\n ancestor = store.load_block(ancestor_id)\n height = ancestor['height']\n return block['height'] >= height and \\\n store.get_block_id_at_height(height, block_id) == ancestor_id\n \n \n def script_to_pubkey_id(store, chain, script):\n script_type, data = chain.parse_txout_script(script)\n\n if script_type in (Chain.SCRIPT_TYPE_ADDRESS, Chain.SCRIPT_TYPE_P2SH):\n return store.pubkey_hash_to_id(data)\n\n if script_type == Chain.SCRIPT_TYPE_PUBKEY:\n return store.pubkey_to_id(chain, data)\n\n if script_type == Chain.SCRIPT_TYPE_MULTISIG:\n script_hash = chain.script_hash(script)\n multisig_id = store._pubkey_id(script_hash, script)\n\n if not store.selectrow(\"SELECT 1 FROM multisig_pubkey WHERE multisig_id = ?\", (multisig_id,)):\n for pubkey in set(data['pubkeys']):\n pubkey_id = store.pubkey_to_id(chain, pubkey)\n store.sql(\"\"\"\n INSERT INTO multisig_pubkey (multisig_id, pubkey_id)\n VALUES (?, ?)\"\"\", (multisig_id, pubkey_id))\n return multisig_id\n\n if script_type == Chain.SCRIPT_TYPE_BURN:\n return PUBKEY_ID_NETWORK_FEE\n\n return None\n\n def pubkey_hash_to_id(store, pubkey_hash):\n return tore._pubkey_id(pubkey_hash, None)\n \n def pubkey_to_id(store, chain, pubkey):\n pubkey_hash = chain.pubkey_hash(pubkey)\n return store._pubkey_id(pubkey_hash, pubkey)\n\n \n def _pubkey_id(store, pubkey_hash, pubkey):\n dbhash = store.binin(pubkey_hash)\n pubkey_id = store.db.get_pubkey_id(dbhash)\n if pubkey_id:\n return pubkey_id\n else:\n pubkey_id = store.new_id(\"pubkey\")\n if pubkey is not None and len(pubkey) > MAX_PUBKEY:\n pubkey = None\n store.db.insert_pubkey(pubkey_id, dbhash, store.binin(pubkey))\n return pubkey_id\n \n \n \n def adopt_orphans(store, b, orphan_work, chain_ids, chain_mask):\n\n ret = [None]\n def receive(x):\n ret[0] = x\n def doit():\n store._adopt_orphans_1(stack)\n stack = [receive, chain_mask, chain_ids, orphan_work, b, doit]\n while stack:\n stack.pop()()\n return ret[0]\n \n def _adopt_orphans_1(store, stack):\n def doit():\n store._adopt_orphans_1(stack)\n def continuation(x):\n store._adopt_orphans_2(stack, x)\n def didit():\n ret = stack.pop()\n stack.pop()(ret)\n\n b = stack.pop()\n orphan_work = stack.pop()\n chain_ids = stack.pop()\n chain_mask = stack.pop()\n ret = {}\n stack += [ ret, didit ]\n\n block_id = b['block_id']\n # 下一个高度\n height = None if b['height'] is None else int(b['height'] + 1)\n\n\n ret[chain_id] = (b, orphan_work)\n \n #下一个区块的\n for row in store.db.get_next_block(block_id):\n \n next_id, nBits, value_out, value_in, nTime, satoshis = row\n nBits = int(nBits)\n nTime = int(nTime)\n satoshis = None if satoshis is None else int(satoshis)\n new_work = util.calculate_work(orphan_work, nBits)\n\n if b['chain_work'] is None:\n chain_work = None\n else:\n chain_work = b['chain_work'] + new_work - orphan_work\n\n if value_in is None:\n value, count1, count2 = store.db.get_block_tx_info(next_id)\n if count1 == count2 + 1:\n value_in = int(value)\n else:\n LOG.debug(\n \"not updating block %d value_in: %s != %s + 1\",\n next_id, repr(count1), repr(count2))\n else:\n value_in = int(value_in)\n generated = None if value_in is None else int(value_out - value_in)\n\n if b['seconds'] is None:\n seconds = None\n total_ss = None\n else:\n new_seconds = nTime - b['nTime']\n seconds = b['seconds'] + new_seconds\n if b['total_ss'] is None or b['satoshis'] is None:\n total_ss = None\n else:\n total_ss = b['total_ss'] + new_seconds * b['satoshis']\n\n if satoshis < 0 and b['satoshis'] is not None and \\\n b['satoshis'] >= 0 and generated is not None:\n satoshis += 1 + b['satoshis'] + generated\n\n if height is None or height < 2:\n search_block_id = None\n else:\n search_block_id = store.get_block_id_at_height(util.get_search_height(height), int(block_id))\n\n \n store.db.update_block(height, store.binin_int(chain_work, WORK_BITS),\n store.intin(value_in),\n store.intin(seconds), store.intin(satoshis),\n store.intin(total_ss), search_block_id,\n next_id)\n\n ss = None\n\n if height is not None:\n \n store.db.update_candidate(height, next_id)\n store.populate_block_txin(int(next_id))\n\n if b['ss'] is None or store.db.get_unlinked_txins(next_id):\n pass\n else:\n tx_ids = map(lambda row: row[0], store.db.get_block_txids(next_id))\n destroyed = store.db.get_block_ss_destroyed(next_id, nTime, tx_ids)\n ss = b['ss'] + b['satoshis'] * (nTime - b['nTime']) - destroyed\n \n store.db.update_block_ss(store.intin(ss),store.intin(destroyed),next_id)\n \n nb = {\n \"block_id\": next_id,\n \"height\": height,\n \"chain_work\": chain_work,\n \"nTime\": nTime,\n \"seconds\": seconds,\n \"satoshis\": satoshis,\n \"total_ss\": total_ss,\n \"ss\": ss}\n\n stack += [ret, continuation,\n chain_mask, None, new_work, nb, doit]\n \n \n def _adopt_orphans_2(store, stack, next_ret):\n ret = stack.pop()\n for chain_id in ret.keys():\n pair = next_ret[chain_id]\n if pair and pair[1] > ret[chain_id][1]:\n ret[chain_id] = pair\n\n\n def _offer_block_to_chain(store, b, chain_id):\n if b['chain_work'] is None:\n in_longest = 0\n else:\n top = b['top'][chain_id][0]\n row = store.db.get_block_by_chain(chain_id)\n \n if row:\n loser_id, loser_height, loser_work = row\n if loser_id != top['block_id'] and \\\n store.binout_int(loser_work) >= top['chain_work']:\n row = None\n if row:\n # New longest chain.\n in_longest = 1\n to_connect = []\n to_disconnect = []\n winner_id = top['block_id']\n winner_height = top['height']\n while loser_height > winner_height:\n to_disconnect.insert(0, loser_id)\n loser_id = store.db.get_prev_block_id(loser_id)\n loser_height -= 1\n while winner_height > loser_height:\n to_connect.insert(0, winner_id)\n winner_id = store.db.get_prev_block_id(winner_id)\n winner_height -= 1\n loser_height = None\n while loser_id != winner_id:\n to_disconnect.insert(0, loser_id)\n loser_id = store.db.get_prev_block_id(loser_id)\n to_connect.insert(0, winner_id)\n winner_id = store.db.get_prev_block_id(winner_id)\n winner_height -= 1\n for block_id in to_disconnect:\n store.db.disconnect_block(block_id, chain_id)\n for block_id in to_connect:\n store.db.connect_block(block_id, chain_id)\n\n elif b['hashPrev'] == store.chains_by.id[chain_id].genesis_hash_prev:\n in_longest = 1 # Assume only one genesis block per chain. XXX\n else:\n in_longest = 0\n\n store.db.insert_candidate(chain_id, b,in_longest)\n if in_longest > 0:\n store.db.update_chain(top['block_id'], chain_id)\n\n\n\n def offer_existing_block(store, hash, chain_Id):\n block_row = store.db.get_block_by_hash(store.hashin(hash))\n if not block_row:\n return False\n \n b = {\n \"block_id\": block_row[0],\n \"height\": block_row[1],\n \"chain_work\": store.binout_int(block_row[2]),\n \"nTime\": block_row[3],\n \"seconds\": block_row[4],\n \"satoshis\": block_row[5],\n \"ss\": block_row[6],\n \"total_ss\": block_row[7]}\n\n if store.db.exist_candidate(b['block_id'], chain_id):\n LOG.info(\"block %d already in chain %d\", b['block_id'], chain_id)\n else:\n if b['height'] == 0:\n b['hashPrev'] = store.chains_by.id[chain_id].genesis_hash_prev\n else:\n b['hashPrev'] = 'dummy' # Fool adopt_orphans.\n store.offer_block_to_chains(b, [chain_id, ])\n return True\n\n\n def init_binfuncs(store):\n store.binin = util.identity\n store.binin_hex = util.from_hex\n store.binin_int = util.binin_int\n store.binout = util.identity\n store.binout_hex = util.to_hex\n store.binout_int = util.binout_int\n store.intin = util.identity\n store.hashin = util.rev\n store.hashin_hex = util.from_hex\n store.hashout = util.rev\n store.hashout_hex = util.to_hex\n \n \n def get_block_id_at_height(store, height, descendant_id):\n if height is None:\n return None\n while True:\n block = store.load_block(descendant_id)\n if block['height'] == height:\n return descendant_id\n descendant_id = block[\n 'search_id'\n if util.get_search_height(block['height']) >= height else\n 'prev_id']\n\n def cache_block(store, block_id, height, prev_id, search_id):\n assert isinstance(block_id, int), block_id\n assert isinstance(height, int), height\n assert prev_id is None or isinstance(prev_id, int)\n assert search_id is None or isinstance(search_id, int)\n block = {\n 'height': height,\n 'prev_id': prev_id,\n 'search_id': search_id}\n store._blocks[block_id] = block\n return block\n\n def load_block(store, block_id):\n block = store._blocks.get(block_id)\n if block is None:\n \n row = store.db.get_block_id_by_height(block_id)\n if row is None:\n return None\n height, prev_id, search_id = row\n block = store.cache_block(\n block_id, int(height),\n None if prev_id is None else int(prev_id),\n None if search_id is None else int(search_id))\n return block\n \n \n def new_id(store, key):\n return store.db.new_id(key)\n \n \n def flush(store):\n if store.bytes_since_commit > 0:\n store.db.commit()\n LOG.debug(\"commit\")\n store.bytes_since_commit = 0\n\n def rollback(store):\n store.db.rollback()\n \n def commit(store):\n store.db.commit()\n \n def get_block_number(store):\n return store.db.get_block_height()\n ", "sub_path": "bitloader/loader/Datastore.py", "file_name": "Datastore.py", "file_ext": "py", "file_size_in_byte": 22792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "Chain.PUBKEY_HASH_LENGTH", "line_number": 22, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 30, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 30, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 31, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Chain.create", "line_number": 57, "usage_type": "call"}, {"api_name": "util.calculate_work", "line_number": 126, "usage_type": "call"}, {"api_name": "util.get_search_height", "line_number": 146, "usage_type": "call"}, {"api_name": "exception.DBException", "line_number": 172, "usage_type": "name"}, {"api_name": "Chain.SCRIPT_TYPE_ADDRESS", "line_number": 306, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_P2SH", "line_number": 306, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_PUBKEY", "line_number": 309, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_MULTISIG", "line_number": 312, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_BURN", "line_number": 324, "usage_type": "attribute"}, {"api_name": "util.calculate_work", "line_number": 393, "usage_type": "call"}, {"api_name": "util.get_search_height", "line_number": 430, "usage_type": "call"}, {"api_name": "util.identity", "line_number": 554, "usage_type": "attribute"}, {"api_name": "util.from_hex", "line_number": 555, "usage_type": "attribute"}, {"api_name": "util.binin_int", "line_number": 556, "usage_type": "attribute"}, {"api_name": "util.identity", "line_number": 557, "usage_type": "attribute"}, {"api_name": "util.to_hex", "line_number": 558, "usage_type": "attribute"}, {"api_name": "util.binout_int", "line_number": 559, "usage_type": "attribute"}, {"api_name": "util.identity", "line_number": 560, "usage_type": "attribute"}, {"api_name": "util.rev", "line_number": 561, "usage_type": "attribute"}, {"api_name": "util.from_hex", "line_number": 562, "usage_type": "attribute"}, {"api_name": "util.rev", "line_number": 563, "usage_type": "attribute"}, {"api_name": "util.to_hex", "line_number": 564, "usage_type": "attribute"}, {"api_name": "util.get_search_height", "line_number": 576, "usage_type": "call"}]} +{"seq_id": "48195774", "text": "from random import *\nimport pygame\nfrom pygame.locals import *\nimport time\nimport sys\n\npygame.init()\n\npygame.display.set_caption('OSD2 Tetrix')\n\nrows, cols = 20, 10\narea = [[0 for col in range(cols)] for row in range(rows)]\nscreen = pygame.display.set_mode((cols*30 ,rows*30 + 10),0,32) # +250\nbackground = pygame.Surface(screen.get_size())\nbackground = background.convert()\nbackground.fill((10, 10, 10))\nspeed = 0.5\n\n# 블럭들\ntetrominoes = [0, 0, 0, 0, 0, 0, 0]\n# I : 막대, cyan 컬러\ntetrominoes[0]=[[\n [1,1,1,1]],\n\n [\n [1],\n [1],\n [1],\n [1]]]\n#colors[0]=0x00FFFF\n# T : ㅗ, purple 컬러\ntetrominoes[1]=[[\n [1,1,1],\n [0,1,0]],\n\n [\n [0,1],\n [1,1],\n [0,1]],\n\n [\n [0,1,0],\n [1,1,1]],\n\n [\n [1,0],\n [1,1],\n [1,0]]]\n#colors[1]=0x767676\n# L : ㄱ회전, orange 컬러\ntetrominoes[2]=[[\n [1,1,1],\n [1,0,0]],\n\n [\n [1,1],\n [0,1],\n [0,1]],\n\n [\n [0,0,1],\n [1,1,1]],\n\n [\n [1,0],\n [1,0],\n [1,1]]]\n#colors[2]=0xFFA500\n# J : ㄴ, blue 컬러\ntetrominoes[3]=[[\n [1,0,0],\n [1,1,1]],\n\n [\n [1,1],\n [1,0],\n [1,0]],\n\n [\n [1,1,1],\n [0,0,1]],\n\n [\n [0,1],\n [0,1],\n [1,1]]]\n#colors[3]=0x0000FF\n# Z : z, red 컬러\ntetrominoes[4]=[[\n [1,1,0],\n [0,1,1]],\n\n [\n [0,1],\n [1,1],\n [1,0]]]\n#colors[4]=0xFF0000\n# S : 벼락, green 컬러\ntetrominoes[5]=[[\n [0,1,1],\n [1,1,0]],\n\n [\n [1,0],\n [1,1],\n [0,1]]]\n#colors[5]=0x00FF00\n# O : 네모, yellow 컬러\ntetrominoes[6]=[[\n [1,1],\n [1,1]]]\n#colors[6]=0xFFFF00\n\ndef RawEnd(blocknum, blockstate) :\n return len(tetrominoes[blocknum][blockstate])\n\ndef ColEnd(blocknum, blockstate) : \n end = 0\n for row in range(len(tetrominoes[blocknum][blockstate])) :\n for col in range(4) : \n if tetrominoes[blocknum][blockstate] == 1 :\n if end < col : \n end = col\n return end\n\ndef DrawBlock() :\n screen.lock()\n for col in range(cols) :\n for row in range(rows) :\n if area[row][col] >= 1 :\n pygame.draw.rect(screen, (255,220,143), Rect((col*30,row*30), (27, 27)))\n pygame.display.update()\n screen.unlock()\n\ndef CleanUp() :\n screen.lock()\n screen.fill((10,10,10))\n pygame.display.update()\n screen.unlock()\n\ndef InsertAreaBlock(num) :\n tet = tetrominoes[num][0]\n tetrow = len(tetrominoes[num][0])\n tetcol = len(tetrominoes[num][0][0])\n row = 0\n\n while (tetrow > 0) :\n for col in range(tetcol) : \n area[0 + row][3 + col] = area[0 + row][3 + col] + tet[row][col]\n tetrow = tetrow - 1\n row = row + 1\n\ndef DownBlock(blocklocation, blocknum, blockstate) :\n tet = tetrominoes[blocknum][blockstate]\n tetcol = len(tetrominoes[blocknum][blockstate][0])\n tetlen = len(tet)\n row = 0\n x = blocklocation[0]\n y = blocklocation[1]\n\n if (x + tetlen == 20) :\n return False\n\n for col in range(tetcol) : \n if (x + tetlen < 20 and tet[tetlen - 1][col] > 0) :\n if (area[x + tetlen][y + col] > 0) :\n return False\n\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] - tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\n tetlen = len(tet)\n row = 0\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + 1 + row][y + col] = area[x + 1 + row][y + col] + tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\n return True\n\ndef CheckHorizon(blocknum, blocklocation) :\n for col in range(10) :\n for row in range(4) :\n if (area[row][col] > 1) :\n return False\n\n return True\n\ndef Rotation(blocklocation, blocknum, blockstate) :\n rotatelen = len(tetrominoes[blocknum])\n tetcol = len(tetrominoes[blocknum][blockstate][0])\n x = blocklocation[0]\n y = blocklocation[1]\n\n blockstate2 = blockstate\n if (blockstate2 + 1 == rotatelen) :\n blockstate2 = 0\n else :\n blockstate2 += 1\n\n tet = tetrominoes[blocknum][blockstate]\n tetlen = len(tet)\n\n for row in range(tetlen) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] - tet[row][col]\n\n tet = tetrominoes[blocknum][blockstate2]\n tetcol = len(tetrominoes[blocknum][blockstate2][0])\n tetlen = len(tet)\n\n for row in range(tetlen) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] + tet[row][col]\n\n return blockstate2\n\ndef Move(blocklocation, blocknum, blockstate, way) :\n rotatelen = len(tetrominoes[blocknum])\n tetcol = len(tetrominoes[blocknum][blockstate][0])\n x = blocklocation[0]\n y = blocklocation[1]\n\n tet = tetrominoes[blocknum][blockstate]\n tetlen = len(tet)\n row = 0\n\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] - tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\n tet = tetrominoes[blocknum][blockstate]\n tetlen = len(tet)\n row = 0\n\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + row][y + col + way] = area[x + row][y + col + way] + tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\ndef Lineall() :\n check = 0\n row2 = 0\n\n for row in range(20) :\n for col in range(10) :\n row2 = 19 - row\n if (area[row2][col] == 1) :\n check += 1\n else :\n break\n if check == 10 :\n return row2\n else :\n check = 0\n\n return 0\n\ndef DownAll(lineall) :\n area2 = area\n row2 = 0\n\n for row in range(rows) :\n print(area[row])\n #print(\"area\")\n\n for row in range(rows) :\n print(area2[row])\n #print(\"area2\")\n\n for col in range(10) : \n area[lineall][col] = 0\n\n for row in range(lineall + 1) : \n for col in range(10) : \n row2 = 19 - row\n if row2 == 0 :\n break\n area[row2][col] = area2[row2 - 1][col]\n\n for row in range(rows) :\n print(area[row])\n #print(\"downall\")\n\n\ndef Run() : \n gameover = False\n noncollision = False\n while not gameover :\n for event in pygame.event.get() :\n speed_up = 1\n spacecheck = 0\n\n if (noncollision == False) :\n while Lineall() != 0 : \n lineall = Lineall()\n DownAll(lineall)\n\n blocknum = randint(0, 6)\n noncollision = True\n InsertAreaBlock(blocknum)\n blocklocation = [0, 3]\n blockstate = 0\n\n if not CheckHorizon(blocknum, blocklocation) :\n noncollision = False\n gameover = True\n break\n\n CleanUp()\n DrawBlock()\n\n if event.type == pygame.QUIT :\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN :\n if event.key == K_UP :\n blockstate = Rotation(blocklocation, blocknum, blockstate)\n CleanUp()\n DrawBlock()\n elif event.key == K_RIGHT :\n if (blocklocation[1] != 10 - ColEnd(blocknum, blockstate)) : \n temp = blockstate\n blockstate = Move(blocklocation, blocknum, blockstate, 1)\n blocklocation[1] += 1\n blockstate = temp\n CleanUp()\n DrawBlock()\n elif event.key == K_LEFT :\n if blocklocation[1] > 0 :\n temp = blockstate\n blockstate = Move(blocklocation, blocknum, blockstate, -1)\n blocklocation[1] -= 1\n blockstate = temp\n CleanUp()\n DrawBlock()\n elif event.key == K_DOWN :\n speed_up = 10\n elif event.key == K_SPACE : \n downboolean2 = DownBlock(blocklocation, blocknum, blockstate)\n blocklocation[0] += 1\n while (downboolean2) :\n downboolean2 = DownBlock(blocklocation, blocknum, blockstate)\n blocklocation[0] += 1\n if (blocklocation[0] == 20 - RawEnd(blocknum, blockstate)) : \n break\n spacecheck = 1\n CleanUp()\n DrawBlock()\n\n if spacecheck == 0 :\n downboolean = DownBlock(blocklocation, blocknum, blockstate)\n if downboolean :\n blocklocation[0] += 1\n elif not downboolean :\n noncollision = False\n\n time.sleep(float(0.1)/speed/speed_up * 3)\n\n #for row in range(rows) :\n # print(area[row])\n #print(\"Cut\")\n\n #if not hasattr(event, 'key') : \n # continue\n\nRun()\n\npygame.quit()", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 9779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 297, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 320, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 321, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 322, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 323, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 365, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 376, "usage_type": "call"}]} +{"seq_id": "171747183", "text": "import sys\nimport base64\n\n\ndef encrypt(msg, key):\n '''\n Encrypt message using one time pad key\n input: msg - message to be encrypted\n key - one time pad key\n output: password\n '''\n while len(key) < len(msg):\n # Increase the length of the key\n diff = len(msg) - len(key)\n key += key[:diff]\n\n # Get the ascii representations of the message and the key\n amsg = list(map(lambda x: ord(x), list(msg)))\n akey = list(map(lambda x: ord(x), list(key[:len(msg)])))\n # XOR the message and the key\n xor = list(map(lambda x, y: x ^ y, amsg, akey))\n # Transform ascii encrypted message into string\n pwd = ''.join(list(map(lambda x: chr(x), xor)))\n # Encode password inn base64\n pwd = base64.b64encode(pwd.encode())\n\n return pwd.decode()\n\notp = \"Never send a human to do a machine's job\"\n\nprint(encrypt(sys.argv[1], otp))\n", "sub_path": "Semester 2/Information Security and Privacy/Homework/Hw2/get_pass.py", "file_name": "get_pass.py", "file_ext": "py", "file_size_in_byte": 888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "base64.b64encode", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "65698623", "text": "from flask import Flask, render_template, Response\r\nimport argparse\r\nfrom flask import url_for\r\nfrom flask import request\r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-d\",\"--data\", required = True,\r\n help = \"Data to send\")\r\nargs = vars(ap.parse_args())\r\ndata=args['data']\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return data\r\n\r\n\r\n\t\r\n\r\ndef shutdown_server():\r\n func = request.environ.get('werkzeug.server.shutdown')\r\n if func is None:\r\n raise RuntimeError('Not running with the Werkzeug Server')\r\n func()\r\n\r\n@app.route('/shutdown', methods=['GET','POST'])\r\ndef shutdown():\r\n shutdown_server()\r\n return 'Server shutting down...'\r\n\t\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', threaded=True)\r\n", "sub_path": "face_recogition/flask_for_cart.py", "file_name": "flask_for_cart.py", "file_ext": "py", "file_size_in_byte": 759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "303130924", "text": "__author__ = 'HXiao'\nfrom rpy2 import robjects as ro\n\n\n\nimport callingR as cr\n\nro.r(\"source('~/Documents/workspace/workSpace/Rscrpits/fundRisk.R')\")\n#ro.r(\"source('~/RScripts/RRiskManagement.R')\")\n\ninnercode = [102000298,102001873]\n#innerType = 1\n\n#cr.RiskManagement.innittt()\nes = cr.RiskManagement.es(innercode)\nprint(es)\n\n#ro.r(\"q()\")\n#sadf n\n", "sub_path": "testR.py", "file_name": "testR.py", "file_ext": "py", "file_size_in_byte": 346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rpy2.robjects.r", "line_number": 8, "usage_type": "call"}, {"api_name": "rpy2.robjects", "line_number": 8, "usage_type": "name"}, {"api_name": "callingR.RiskManagement.es", "line_number": 15, "usage_type": "call"}, {"api_name": "callingR.RiskManagement", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "71677587", "text": "#!/usr/bin/env python\nfrom __future__ import division\nimport sys\nimport os\nimport json\nimport re\nimport copy\nfrom yaml import load, dump\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nclass Parser():\n TYPE_DIMENSION = 'dimension'\n TYPE_DIMENSION_GROUP = 'dimension_group'\n TYPE_MEASURE = 'measure'\n TYPE_FILTER = 'filter'\n\n DEFAULT_TIMEFRAMES = ['date', 'day_of_month', 'day_of_week', 'day_of_week_index', 'day_of_year', 'hour', 'hour_of_day', 'minute', 'month', 'month_name', 'month_num', 'quarter', 'quarter_of_year', 'time', 'time_of_day', 'week', 'week_of_year', 'year' ]\n\n @staticmethod\n def isRawReference(field):\n return re.match('^ *\\$\\{TABLE\\}.[a-z_0-9]+ *$', field, re.MULTILINE)\n\n def __init__(self, stream):\n self.writeStream = stream\n\n @staticmethod\n def isDimensionGroup(field):\n if Parser.TYPE_DIMENSION_GROUP in field:\n return True\n if Parser.TYPE_DIMENSION in field and ('type' in field and field['type'] == 'time'):\n return True\n return False\n\n def fire(self,field,type,field_orig=None):\n if type in [Parser.TYPE_DIMENSION,Parser.TYPE_DIMENSION_GROUP] and 'sql' in field:\n # if raw reference, then remove a bunch of stuff\n if Parser.isRawReference(field['sql']):\n # remove absolute reference\n field_orig.pop('type', None)\n field_orig.pop('sql', None)\n\n if type in [Parser.TYPE_MEASURE] and 'sql' in field:\n if re.search('[ ]*(\\$\\{TABLE\\}.[a-z_0-9]+)[ ]*', field['sql']):\n field_orig['sql'] = re.sub('\\$\\{TABLE\\}.([a-z_0-9]+)',r'${\\1}',field['sql'])\n\n def extractGroup(self,field):\n set = []\n name = field['dimension_group'] if Parser.TYPE_DIMENSION_GROUP in field else field['dimension']\n if 'timeframes' in field:\n for dim in field['timeframes']:\n set.append(name+'_'+dim)\n else:\n # default timeframes\n for dim in Parser.DEFAULT_TIMEFRAMES:\n set.append(name+'_'+dim)\n return set\n\n def dump(self,tree):\n dump(tree, self.writeStream, default_flow_style=False)\n\n\n#\n # Parser that handles sets\n #\nclass baseViewParser(Parser):\n\n def __init__(self, stream):\n self.writeStream = stream\n self.fields = []\n\n def fire(self,field,type,field_orig=None):\n if type in ['dimension','dimension_group'] and 'sql' in field:\n name = field[type]\n # re.search('[ ]*(\\$\\{TABLE\\}.[a-z_]+)[ ]*', field['sql']):\n if field['sql'].startswith('${TABLE}.') and Parser.isRawReference(field['sql']):\n new_field = {}\n new_field[type] = name # e.g. new_field['dimension'] = name\n if 'type' in field:\n new_field['type'] = field['type']\n if 'sql' in field:\n new_field['sql'] = field['sql']\n else:\n new_field['sql'] = '${TABLE}.'+name\n self.fields.append(new_field)\n\n def dump(self,tree):\n new_tree = []\n for directive in tree:\n new_tree.append({\n 'view': {}\n , 'fields': self.fields\n }) # tree is blank at the beginning\n\n dump(new_tree, self.writeStream, default_flow_style=False)\n\n#\n # Parser that handles sets\n #\nclass setParser(Parser):\n sets = {\n 'Clickstream': 1\n , 'Sponsorships': 1\n , 'User Activity': 1\n , 'Consumer': 1\n , 'Email': 1\n }\n\n DIMENSION_ALL_TYPE = 1\n DIMENSION_GROUP_ALL_TYPE = 2\n\n def __init__(self, stream):\n self.writeStream = stream\n\n self.hiddenFields = {\n 'dimension': []\n , 'dimension_group': []\n }\n\n self.resulting_set = {\n 'dimensions': {}\n , 'measures': {}\n , 'all': {\n 'dimension': {}\n , 'dimension_group': {}\n , 'filter': {}\n , 'measure': {}\n }\n }\n\n for key in setParser.sets.keys():\n self.resulting_set['dimensions'][key] = {}\n self.resulting_set['measures'][key] = {}\n\n def fire(self,field,type,field_orig=None):\n name = field[type]\n if type in [Parser.TYPE_DIMENSION,Parser.TYPE_DIMENSION_GROUP,Parser.TYPE_MEASURE]: #,Parser.TYPE_FILTER]:\n if Parser.isDimensionGroup(field):\n dimensions = self.extractGroup(field)\n for dim in dimensions:\n self.resulting_set['all'][Parser.TYPE_DIMENSION][dim] = 2\n else:\n self.resulting_set['all'][type][name] = 1\n\n # hide dimensions and measures\n if type in [Parser.TYPE_DIMENSION]:\n self.hiddenFields['dimension'].append(name)\n if type in [Parser.TYPE_DIMENSION_GROUP]:\n self.hiddenFields['dimension_group'].append(name)\n\n matching_category = self.isFilterMatch(field) or self.isLabelMatch(field)\n if matching_category:\n if Parser.isDimensionGroup(field):\n dimensions = self.extractGroup(field)\n for dim in dimensions:\n self.resulting_set['dimensions'][matching_category][dim] = 2 # since it is part of a group\n elif type == Parser.TYPE_DIMENSION:\n self.resulting_set['dimensions'][matching_category][field[type]] = 1\n elif type == Parser.TYPE_MEASURE:\n self.resulting_set['measures'][matching_category][field[type]] = 1\n # # now add all the filter dependencies\n if 'filters' in field:\n for filter_field in field['filters'].keys():\n self.resulting_set['measures'][matching_category][filter_field] = 1\n\n\n\n #\n # returns set name if match, otherwise None\n #\n def isFilterMatch(self,field):\n #TODO\n return None\n\n #\n # returns set name if match, otherwise None\n #\n def isLabelMatch(self,field):\n if 'view_label' in field:\n for category in self.sets.keys():\n if category in field['view_label']:\n return category\n return None\n\n def dump(self,tree):\n self.writeStream.write('\\tsets:\\n')\n # for category in self.sets.keys():\n # lookmlcatname = category.lower().replace(\" \", \"\")\n # self.writeStream.write('\\t\\tdim_'+lookmlcatname+':\\n')\n # for field in self.resulting_set['dimensions'][category].keys():\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n # self.writeStream.write('\\t\\tmes_'+lookmlcatname+':\\n')\n # for field in self.resulting_set['measures'][category].keys():\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n # self.writeStream.write('\\t\\t'+lookmlcatname+':\\n')\n # self.writeStream.write('\\t\\t\\t- dim_'+lookmlcatname+'*\\n')\n # self.writeStream.write('\\t\\t\\t- mes_'+lookmlcatname+'*\\n')\n\n # self.writeStream.write('\\t\\tall_dim:\\n')\n # for field in self.resulting_set['all']['dimension'].keys():\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n self.writeStream.write('\\t\\tall_measures:\\n')\n for field in self.resulting_set['all']['measure'].keys():\n self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n # for field in self.resulting_set['all']['dimension_group']:\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n for field in self.resulting_set['all']['filter'].keys():\n self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n\n self.writeStream.write('\\tfields:\\n')\n\n wrote_groups = {}\n\n for field in self.hiddenFields['dimension']:\n self.writeStream.write('\\t- dimension: '+field+'\\n\\t\\thidden: true\\n')\n for field in self.hiddenFields['dimension_group']:\n self.writeStream.write('\\t- dimension_group: '+field+'\\n\\t\\thidden: true\\n')\n\n\n\ndef processElement(listeners,element):\n if isinstance(element, list):\n for el in element:\n processElement(listeners,el)\n elif isinstance(element, dict):\n element_copy = copy.deepcopy(element)\n for key in element.keys():\n if key in ['dimension','dimension_group','filter','measure']:\n for listener in listeners:\n listener.fire(element_copy,key,element)\n break;\n else:\n processElement(listeners,element[key])\n\n\ndef processView(stream,listeners):\n file_tree = load(stream, Loader=Loader)\n processElement(listeners,file_tree)\n for listener in listeners:\n listener.dump(file_tree)\n stream.close()\n\n#\n # Outputs 3 files:\n # - raw data model (which has only {TABLE}. references, types)\n # - a new copy of the original LookML file with all {TABLE} references replaced with {} names\n # - a set file based on filter matches\n #/\ndef main():\n INPUT_FILE = ''\n OUTPUT_DIRECTORY = ''\n if len(sys.argv) >= 3:\n INPUT_FILE = sys.argv[1]\n OUTPUT_DIRECTORY = sys.argv[2]\n else:\n print(\"./set_builder.py INPUT_LOOKML_FILE OUTPUT_DIRECTORY\")\n sys.exit()\n\n if OUTPUT_DIRECTORY.find('/',len(OUTPUT_DIRECTORY)-1) == -1:\n OUTPUT_DIRECTORY = OUTPUT_DIRECTORY+'/'\n\n file_path = os.path.realpath(INPUT_FILE)\n stream = file(file_path, 'r')\n\n raw_schema_file = os.path.join(OUTPUT_DIRECTORY,'raw_schema.lookml')\n modified_file = os.path.join(OUTPUT_DIRECTORY,'mod_file.lookml')\n set_file = os.path.join(OUTPUT_DIRECTORY,'set_file.lookml')\n\n # start writing out the output for these files immediately\n listeners = []\n with open(raw_schema_file,'w') as raw_f:\n with open(modified_file,'w') as mod_f:\n with open(set_file,'w') as set_f:\n listeners.append(baseViewParser(raw_f))\n listeners.append(Parser(mod_f))\n listeners.append(setParser(set_f))\n processView(stream,listeners)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "set_builder.py", "file_name": "set_builder.py", "file_ext": "py", "file_size_in_byte": 9204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "re.match", "line_number": 25, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 47, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 63, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 98, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 229, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 240, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 240, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 255, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 256, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 257, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path", "line_number": 268, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}]} +{"seq_id": "596614256", "text": "\"\"\"\n.. module MultiCropAndOpenFace\n :synopsis: Script to apply cropping and OpenFace to all videos in a directory.\n\n\"\"\"\n\nimport glob\nimport json\nimport os\nimport subprocess\nimport sys\nimport numpy as np\n\nsys.path.append('/home/gvelchuru/')\nfrom OpenFaceScripts.runners import CropAndOpenFace\n\n\ndef make_vids(path):\n \"\"\"\n Return list of vids not processed yet given a path\n :param path: Path to video directory\n :type path: str\n :return: list of vids to do\n \"\"\"\n folder_components = set(os.path.join(path, x) for x in os.listdir(path))\n return [x for x in glob.glob(os.path.join(path, '*.avi')) if (\n os.path.splitext(x)[0] + '_cropped' not in folder_components or 'au.txt' not in os.listdir(\n os.path.join(path, os.path.splitext(x)[0] + '_cropped')))]\n\n\ndef make_crop_and_nose_files(path):\n crop_file = os.path.join(path, 'crop_files_list.txt')\n nose_file = os.path.join(path, 'nose_files_list.txt')\n\n if not os.path.exists(crop_file):\n crop_path = sys.argv[sys.argv.index('-c') + 1]\n crop_txt_files = CropAndOpenFace.find_txt_files(crop_path)\n json.dump(crop_txt_files, open(crop_file, mode='w'))\n\n if not os.path.exists(nose_file):\n nose_path = sys.argv[sys.argv.index('-n') + 1]\n nose_txt_files = CropAndOpenFace.find_txt_files(nose_path)\n json.dump(nose_txt_files, open(nose_file, mode='w'))\n\n return json.load(open(crop_file)), json.load(open(nose_file))\n\n\nif __name__ == '__main__':\n\n path = sys.argv[sys.argv.index('-id') + 1]\n\n vids = make_vids(path)\n num_GPUs = 2\n processes = []\n indices = np.linspace(0, len(vids), num=num_GPUs + 1)\n for index in range(len(indices) - 1):\n processes.append(subprocess.Popen(\n ['python3', '/home/gvelchuru/OpenFaceScripts/helpers/HalfCropper.py', '-id', path, '-vl',\n str(int(indices[index])), '-vr',\n str(int(indices[index + 1]))],\n env={'CUDA_VISIBLE_DEVICES': '{0}'.format(str(index))}))\n [p.wait() for p in processes]\n", "sub_path": "runners/MultiCropAndOpenFace.py", "file_name": "MultiCropAndOpenFace.py", "file_ext": "py", "file_size_in_byte": 2041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 36, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace.find_txt_files", "line_number": 37, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace", "line_number": 37, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 41, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace.find_txt_files", "line_number": 42, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace", "line_number": 42, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 43, "usage_type": "call"}, {"api_name": "json.load", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 55, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "430582480", "text": "from django import forms\nfrom crispy_forms.bootstrap import TabHolder, Tab\nfrom common_data.forms import BootstrapMixin\nfrom django.contrib.auth import authenticate\nfrom crispy_forms.helper import FormHelper\n\nfrom crispy_forms.layout import (Row, \n Column, \n Fieldset,\n Submit, \n Div,\n Layout,\n HTML)\nfrom . import models\nfrom employees.models import Employee\nfrom django_select2.forms import Select2Widget\n\nclass ServiceForm(forms.ModelForm,BootstrapMixin):\n category = forms.ModelChoiceField(models.ServiceCategory.objects.all(), required=False)\n\n class Meta:\n fields = \"__all__\"\n model = models.Service\n\n widgets = {\n 'description':forms.Textarea(attrs={'rows':4, 'cols':15}),\n 'procedure': Select2Widget(attrs={'data-width': '20rem'})\n } \n \n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n 'name',\n 'description',\n Row(\n Column('flat_fee', css_class='form-group col-6'),\n Column('hourly_rate', css_class='form-group col-6'),\n ),\n Row(\n Column('category', css_class='form-group col-4'),\n Column('procedure', css_class='form-group col-4'),\n Column('frequency', css_class='form-group col-4'),\n ),\n 'is_listed',\n Div(Submit('submit', 'Submit'), css_class=\"floating-submit\")\n )\nclass ServiceCategoryForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n fields = \"__all__\"\n model = models.ServiceCategory\n\n\nclass ServicePersonForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n fields = \"__all__\"\n model = models.ServicePerson\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\nclass ServicePersonUpdateForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n exclude = \"employee\",\n model = models.ServicePerson\n\n\nclass ServiceTeamForm(forms.ModelForm, BootstrapMixin):\n #create members in react\n class Meta:\n exclude = \"members\",\n model = models.ServiceTeam\n widgets = {\n \"description\": forms.Textarea(attrs={\"rows\": 4})\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column(\n 'Team Creation Form',\n 'name',\n 'description',\n 'manager',\n css_class=\"col-6\"),\n Column(\n HTML(\n \"\"\"\n

Select Service People:

\n
\n \"\"\"\n ), css_class=\"col-6\")\n )\n )\n self.helper.add_input(Submit('submit', 'Submit')) \n\nclass ServiceWorkOrderForm(forms.ModelForm, BootstrapMixin):\n #create service people in react\n status = forms.CharField(widget=forms.HiddenInput)\n works_request = forms.ModelChoiceField(\n models.WorkOrderRequest.objects.all(),\n widget=forms.HiddenInput)\n class Meta:\n fields = ['date', 'time', 'expected_duration', 'team', 'status', 'description', 'works_request' ]\n model = models.ServiceWorkOrder\n widgets = {\n 'description': forms.Textarea(attrs={'rows': 4})\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n TabHolder(\n Tab('Form',\n Row(\n Column('date', css_class=\"form group col-6\"),\n Column('time', css_class=\"form group col-6\"),\n ),\n 'works_request',\n 'description',\n 'completed',\n 'expected_duration',\n 'status',\n 'authorized_by',\n 'team',\n 'progress',\n ),\n Tab('Service People',\n HTML(\n \"\"\"\n
\n \"\"\"\n ),\n ),\n )\n )\n self.helper.add_input(Submit('submit', 'Submit'))\n \nclass ServiceWorkOrderCompleteForm(forms.ModelForm, BootstrapMixin):\n progress = forms.CharField(widget=forms.HiddenInput, required=False)\n service_time = forms.CharField(widget=forms.HiddenInput, required=False)\n class Meta:\n fields = [\"progress\"]\n model = models.ServiceWorkOrder\n \n\nclass ServiceWorkOrderAuthorizationForm(BootstrapMixin, forms.Form):\n '''Authorization handled in the functional view work_order_authorize'''\n \n authorized_by = forms.ModelChoiceField(Employee.objects.filter(serviceperson__isnull=False))\n password = forms.CharField(widget=forms.PasswordInput)\n status = forms.ChoiceField(choices=models.ServiceWorkOrder.STATUS_CHOICES)\n\n def clean(self, *args, **kwargs):\n cleaned_data = super().clean(*args, **kwargs)\n password = cleaned_data['password']\n employee = cleaned_data['authorized_by']\n\n if not authenticate(username=employee.user.username, password=password):\n raise forms.ValidationError('The password supplied is incorrect.')\n\n return cleaned_data\n\nclass EquipmentRequisitionForm(forms.ModelForm, BootstrapMixin):\n equipment = forms.CharField(widget=forms.HiddenInput)\n class Meta:\n exclude = \"authorized_by\", \"released_by\", 'received_by', 'returned_date'\n model = models.EquipmentRequisition\n widgets = {\n 'work_order': Select2Widget(attrs={'data-width': '20rem'})\n }\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('date','equipment' , css_class=\"col-sm-6\"), \n Column('work_order', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n \n )\n self.helper.add_input(Submit('submit', 'Submit'))\n \n\n\nclass WorkOrderEquipmentRequisitionForm(forms.ModelForm, BootstrapMixin):\n work_order = forms.ModelChoiceField(models.ServiceWorkOrder.objects.all(), \n widget=forms.HiddenInput)\n equipment = forms.CharField(widget=forms.HiddenInput)\n \n class Meta:\n exclude = \"authorized_by\", \"released_by\", 'received_by', 'returned_date'\n model = models.EquipmentRequisition\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('work_order', 'equipment', css_class=\"col-sm-12\"), css_class=\"form-row\"),\n Row(\n Column('date', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-12\"),\n css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n \n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\n \n\nclass ConsumablesRequisitionForm(forms.ModelForm, BootstrapMixin):\n consumables = forms.CharField(widget=forms.HiddenInput)\n \n class Meta:\n exclude = \"authorized_by\", \"released_by\",\n model = models.ConsumablesRequisition\n widgets = {\n 'work_order': Select2Widget(attrs={'data-width': '20rem'})\n }\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('date', 'consumables', css_class=\"col-sm-6\"), \n Column('work_order', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n \n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass WorkOrderConsumablesRequisitionForm(forms.ModelForm, BootstrapMixin):\n work_order = forms.ModelChoiceField(models.ServiceWorkOrder.objects.all(), \n widget=forms.HiddenInput)\n consumables = forms.CharField(widget=forms.HiddenInput)\n \n class Meta:\n exclude = \"authorized_by\", \"released_by\",\n model = models.ConsumablesRequisition\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('work_order', 'consumables',css_class=\"col-sm-12\"), css_class=\"form-row\"),\n Row(\n Column('date', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-12\"),\n css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\nclass ServiceProcedureForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n exclude = \"required_equipment\", \"required_consumables\"\n model = models.ServiceProcedure\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n TabHolder(\n Tab('Procedure Details',\n 'name',\n 'reference',\n 'author',\n 'description',\n ),\n Tab('procedure steps',\n HTML(\n \"\"\"\n
\n
\n \"\"\"\n )\n ),\n Tab('Select Equipment And Consumables',\n HTML(\n \"\"\"\n
\n \"\"\"\n )\n ),\n )\n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass EquipmentReturnForm(BootstrapMixin, forms.Form):\n received_by = forms.ModelChoiceField(Employee.objects.filter(inventorycontroller__isnull=False))\n password = forms.CharField(widget=forms.PasswordInput)\n return_date = forms.DateField()\n requisition = forms.ModelChoiceField(models.EquipmentRequisition.objects.all(), widget=forms.HiddenInput)\n\n def clean(self):\n cleaned_data = super().clean()\n usr = authenticate(\n username=cleaned_data['received_by'].user.username,\n password=cleaned_data['password'])\n\n if not usr:\n raise forms.ValidationError(\n 'The Inventory Controller password is incorrect.')\n \n requisition = cleaned_data['requisition']\n requisition.received_by = cleaned_data['received_by']\n requisition.returned_date = cleaned_data['return_date']\n requisition.save()\n return cleaned_data\n\nclass WorkOrderRequestForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n fields = 'created', 'created_by','description', 'service', 'status'\n model = models.WorkOrderRequest\n widgets = {\n 'description': forms.Textarea(attrs={'rows': 4})\n }\n\n ", "sub_path": "services/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 13340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.forms.ModelForm", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "django_select2.forms.Select2Widget", "line_number": 27, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 33, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 34, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 37, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 41, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Div", "line_number": 47, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 47, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 49, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 49, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 55, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 55, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 62, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 64, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 64, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 64, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 70, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 76, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 76, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 81, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 82, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 83, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 84, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 90, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 91, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 99, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 101, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 101, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 103, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 103, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 103, "usage_type": "attribute"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 104, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 104, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 106, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 106, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 111, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 111, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 116, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 117, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.TabHolder", "line_number": 118, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 119, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 120, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 121, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 122, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 133, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 134, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 142, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 144, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 144, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 144, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 145, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 145, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 145, "usage_type": "attribute"}, {"api_name": "django.forms.CharField", "line_number": 146, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 146, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 146, "usage_type": "attribute"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 152, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 152, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 155, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 155, "usage_type": "name"}, {"api_name": "employees.models.Employee.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "employees.models.Employee.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "employees.models.Employee", "line_number": 155, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 156, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 156, "usage_type": "name"}, {"api_name": "django.forms.PasswordInput", "line_number": 156, "usage_type": "attribute"}, {"api_name": "django.forms.ChoiceField", "line_number": 157, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 157, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 164, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 165, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 165, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 169, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 169, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 169, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 170, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 170, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 170, "usage_type": "attribute"}, {"api_name": "django_select2.forms.Select2Widget", "line_number": 175, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 182, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 183, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 184, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 185, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 186, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 187, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 188, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 189, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 191, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 192, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 193, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 194, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 197, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 201, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 201, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 201, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 202, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 202, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 203, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 203, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 204, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 204, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 204, "usage_type": "attribute"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 213, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 214, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 215, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 216, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 217, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 218, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 219, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 220, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 221, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 222, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 224, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 225, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 227, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 230, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 234, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 234, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 234, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 235, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 235, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 235, "usage_type": "attribute"}, {"api_name": "django_select2.forms.Select2Widget", "line_number": 241, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 248, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 249, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 250, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 251, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 252, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 253, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 254, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 255, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 257, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 258, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 259, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 260, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 263, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 266, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 266, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 266, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 267, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 267, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 268, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 268, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 269, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 269, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 269, "usage_type": "attribute"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 278, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 279, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 280, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 281, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 282, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 283, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 284, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 285, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 286, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 287, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 289, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 290, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 292, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 294, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 296, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 296, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 296, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 303, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 304, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.TabHolder", "line_number": 305, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 306, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 312, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 313, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 320, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 321, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 329, "usage_type": "call"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 332, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 332, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 332, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 333, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 333, "usage_type": "name"}, {"api_name": "employees.models.Employee.objects.filter", "line_number": 333, "usage_type": "call"}, {"api_name": "employees.models.Employee.objects", "line_number": 333, "usage_type": "attribute"}, {"api_name": "employees.models.Employee", "line_number": 333, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 334, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 334, "usage_type": "name"}, {"api_name": "django.forms.PasswordInput", "line_number": 334, "usage_type": "attribute"}, {"api_name": "django.forms.DateField", "line_number": 335, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 335, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 336, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 336, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 336, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 340, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 345, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 345, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 354, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 354, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 354, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 359, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 359, "usage_type": "name"}]} +{"seq_id": "18779576", "text": "from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom Utils.Tools import success, error,successResult,successResultList,errorResult\nfrom .models import Question\nfrom Worker.models import Worker\nfrom Respondent.models import RespondentMain,RespondentAnswer\nimport os\nimport json\nfrom Utils import Tools\n\n\n'''\n\n\n#查询未结束的问卷\ndef surveyNotEnd(request):\n if request.method == 'POST':\n m=SurveyService()\n lt=m.querySurveyAllList(searchkey='',end=False)\n relist=list()\n if lt != None and len(lt) > 0:\n for item in lt:\n sub=dict()\n sub['surveyid']=item.strSurveyId\n sub['title']=item.strTitle\n sub['max']=str(item.iMax)\n sub['createtime']=item.strCreateTime\n sub['endtime']=item.strEndTime\n sub['remark']=item.strRemark\n sub['filepath']=item.strFilePath\n relist.append(sub)\n if len(relist) > 0:\n re=successResultList(relist)\n return JsonResponse(re)\n else:\n re=errorResult('',-8)\n return JsonResponse(re)\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n\n\n#下载问卷文件\ndef downFile(request):\n sid=request.GET.get('sid')\n if sid == None or sid=='':\n pass\n m=SurveyService()\n\n ob=m.getSurvey(sid)\n if ob == None or ob.strFilePath == None or ob.strFilePath == '':\n pass\n filepath_=ob.strFilePath\n\n\n def readFile(fn, buf_size=262144):#大文件下载,设定缓存大小\n f = open(fn, \"rb\")\n while True:#循环读取\n c = f.read(buf_size)\n if c:\n yield c\n else:\n break\n f.close()\n\n response = JsonResponse(readFile(filepath_), content_type='APPLICATION/OCTET-STREAM') #设定文件头,这种设定可以让任意文件都能正确下载,而且已知文本文件不是本地打开\n response['Content-Disposition'] = 'attachment; filename='+sid.encode('utf-8') + sid.encode('utf-8')#设定传输给客户端的文件名称\n response['Content-Length'] = os.path.getsize(filepath_)#传输给客户端的文件大小\n return response\n'''\n\n\n#终端登录\n@csrf_exempt\ndef mobileLogin(request):\n if request.method == 'POST':\n strJson=request.POST.get('strJson','')\n try:\n data=json.loads(strJson)\n except:\n return JsonResponse(errorResult('请求数据解析失败'))\n username=data.get('username','')\n pwd2=data.get('pwd','')\n ob=Worker.objects.filter(strSNO=username)\n if ob == None or len(ob) == 0:\n return JsonResponse(errorResult('用户名密码错误1'))\n else:\n bean=ob[0]\n if bean.strPwd==pwd2:\n return JsonResponse(successResult(bean.strSNO))\n else:\n return JsonResponse(errorResult('用户名密码错误2'))\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n\n\n#查询问题\n@csrf_exempt\ndef queryQuestion(request):\n if request.method == 'POST':\n strJson=request.POST.get('strJson','')\n try:\n timestr=json.loads(strJson)\n except:\n return JsonResponse(errorResult('请求数据解析失败'))\n\n tmp=Question.objects.filter(strTime__gte=timestr.get('time',''))\n re=list()\n if tmp !=None and len(tmp)>0:\n for item in tmp:\n sub=dict()\n sub['qid']=item.strQId\n sub['qno']=item.strQNo\n sub['question']=item.strQuestion\n sub['img']=item.strImg\n sub['type']=item.iType\n sub['a1']=item.strA1\n sub['a2']=item.strA2\n sub['a3']=item.strA3\n sub['a4']=item.strA4\n sub['a5']=item.strA5\n\n re.append(sub)\n return JsonResponse(successResultList(re))\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n\n\n#上传回访信息(单人信息和答案)\n@csrf_exempt\ndef upRespondent(request):\n if request.method == 'POST':\n strJson=request.POST.get('strJson','')\n try:\n data=json.loads(strJson)\n person=data.get('person')\n alist=data.get('list')\n except:\n return JsonResponse(errorResult('请求数据解析失败'))\n\n #受访人信息\n if person != None:\n bean=RespondentMain()\n bean.strRid=person.get('rid','')\n bean.strName=person.get('name','')\n bean.iAge=person.get('age',0)\n\n sex=person.get('age',0)\n if sex == 1 :\n bean.strSex='男'\n else:\n bean.strSex='女'\n bean.strPhone=person.get('phone','')\n bean.strBorn=person.get('born','')\n bean.strMZ=person.get('mz','')\n\n bean.strStudyLv=person.get('studylv','')\n bean.strJob=person.get('job','')\n bean.strArea=person.get('area','')\n bean.strAddress=person.get('address','')\n bean.strMoney=person.get('money','')\n bean.strKISH=person.get('kish','')\n bean.strReceptTime=person.get('uptime','')\n bean.strSick=person.get('sick','')\n bean.iSickYear=person.get('year',0)\n bean.iLocal=person.get('local',0)\n bean.strSNO=person.get('sno','')\n bean.strSName=person.get('sname','')\n bean.strHealth=person.get('health','')\n bean.save()\n\n\n #答案\n if alist != None and len(alist) > 0:\n dblist=list()\n for item in alist:\n sub=RespondentAnswer()\n sub.strId=Tools.CreateUUID()\n sub.strRid=item.get('rid','')\n sub.strQId=item.get('qid','')\n sub.strItem=item.get('itemid','')\n dblist.append(sub)\n RespondentAnswer.objects.bulk_create(dblist)\n return JsonResponse(successResult('操作成功'))\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n", "sub_path": "Question/mobileview.py", "file_name": "mobileview.py", "file_ext": "py", "file_size_in_byte": 6267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.loads", "line_number": 80, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 82, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 82, "usage_type": "call"}, {"api_name": "Worker.models.Worker.objects.filter", "line_number": 85, "usage_type": "call"}, {"api_name": "Worker.models.Worker.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "Worker.models.Worker", "line_number": 85, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 87, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 87, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "Utils.Tools.successResult", "line_number": 91, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 93, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 93, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 95, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 95, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 75, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 105, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 107, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Question.objects.filter", "line_number": 109, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 109, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 126, "usage_type": "call"}, {"api_name": "Utils.Tools.successResultList", "line_number": 126, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 128, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 128, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 100, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 138, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 142, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 142, "usage_type": "call"}, {"api_name": "Respondent.models.RespondentMain", "line_number": 146, "usage_type": "call"}, {"api_name": "Respondent.models.RespondentAnswer", "line_number": 180, "usage_type": "call"}, {"api_name": "Utils.Tools.CreateUUID", "line_number": 181, "usage_type": "call"}, {"api_name": "Utils.Tools", "line_number": 181, "usage_type": "name"}, {"api_name": "Respondent.models.RespondentAnswer.objects.bulk_create", "line_number": 186, "usage_type": "call"}, {"api_name": "Respondent.models.RespondentAnswer.objects", "line_number": 186, "usage_type": "attribute"}, {"api_name": "Respondent.models.RespondentAnswer", "line_number": 186, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 187, "usage_type": "call"}, {"api_name": "Utils.Tools.successResult", "line_number": 187, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 189, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 189, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "577308025", "text": "#! /usr/bin/python\n\nimport warnings\nimport wikipedia\nimport datetime\nfrom translate import Translator\n\nLEADING_MESSAGE = \"ducky: \"\n\ndef wikipedia_ref(phrase):\n return wikipedia.summary(phrase)\n\nwhile True:\n user_input = input(\"ducky>>>\").strip()\n if user_input == \"date\":\n print(\"{} Da date is: {}\".format(LEADING_MESSAGE, datetime.date.today()))\n \n elif \"wiki\" in user_input:\n try:\n replaced_user_input = user_input.replace(\"wiki\", \"\")\n print(\"{} Here is your article: {}\".format(LEADING_MESSAGE, wikipedia_ref(replaced_user_input))) \n \n except wikipedia.exceptions.DisambiguationError:\n print(\"{} That is not a wikipedia page, here are some suggestions: {}\".format(LEADING_MESSAGE, wikipedia.search(replaced_user_input, suggestion=True))) \n \n elif \"tra\" in user_input:\n replaced_user_input = user_input.replace(\"tra\", \"\")\n list_of_args = replaced_user_input.split()\n arg_1 = list_of_args[0]\n arg_2 = list_of_args[1]\n translator = Translator(to_lang=arg_2)\n translation = translator.translate(arg_1)\n print(\"{} Translation: {} to {}\".format(LEADING_MESSAGE, arg_1, translation))\n \n elif \"bye\" in user_input:\n break \n", "sub_path": "ducky/ducky.py", "file_name": "ducky.py", "file_ext": "py", "file_size_in_byte": 1283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "wikipedia.summary", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 16, "usage_type": "attribute"}, {"api_name": "wikipedia.exceptions", "line_number": 23, "usage_type": "attribute"}, {"api_name": "wikipedia.search", "line_number": 24, "usage_type": "call"}, {"api_name": "translate.Translator", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "457395364", "text": "# coding: utf-8\n\nimport os\nfrom selenium import webdriver\nimport unittest\nimport time,sys\nimport re\nimport HTMLTestRunner\n\n\nclass Baidu(unittest.TestCase):\n\n def setUp(self):\n # 添加 driver\n self.path = r\"C:\\Users\\lenovo\\AppData\\Local\\Google\\Chrome\\Application\\chromedriver.exe\"\n self.driver = webdriver.Chrome(executable_path=self.path)\n self.url = \"http://map.baidu.com\"\n\n # 增加 截图 函数\n def add_img(self):\n self.imgs.append(self.driver.get_screenshot_as_base64())\n return True\n\n def test_map_search(self):\n self.driver.get(self.url)\n self.driver.find_element_by_id(\"sole-input\").send_keys(u\"淮海路思南路口\")\n self.driver.find_element_by_id(\"search-button\").click()\n time.sleep(2)\n self.add_img() # 截图 调用 之前定义的函数\n return \"map search is done!\"\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n suit = unittest.TestSuite()\n suit.addTest(Baidu(\"test_map_search\"))\n\n runner = unittest.TextTestRunner()\n runner.run(suit)\n", "sub_path": "selenium_python自动化实战(练习)/htmltestrunner/python3/[推荐]输出_截图版/示例/Testcase/baidu/Sub_map.py", "file_name": "Sub_map.py", "file_ext": "py", "file_size_in_byte": 1103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 37, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "276649895", "text": "from django.shortcuts import render, redirect\nfrom datawisata.models import Datawisata\n\ndef home(request):\n datawisatas = Datawisata.objects.all()\n \n konteks = {\n 'datawisatas' : datawisatas, \n }\n\n return render(request, 'halamanutama.html', konteks)\n\n\n# def tambah_wisata(request):\n# form = FormDataWisata\n# konteks = {\n# 'form' : form,\n# }\n# return render(request, 'tambahWisata.html', konteks)\n\n\n# def hapus(request,id):\n# hapus = Datawisata.objects.filter(pk=id).delete()\n# return render(request,'halamanutama.html',hapus)\n\n# def edit(request,id):\n# edit = Datawisata.object.get(pk=id)\n# template = edit.html\n# if request.POST :\n# form = FormDataWisata(request.POST, instance=edit)\n# if form.is_valid():\n# form.save()\n# return redirect('edit', id=id)\n# else :\n# form = FormDataWisata(instance=edit)\n# konteks = {\n# 'form' : form,\n# 'edit' : edit,\n# }\n# return render(request, template, konteks)", "sub_path": "wisatajogja/home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1064, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datawisata.models.Datawisata.objects.all", "line_number": 5, "usage_type": "call"}, {"api_name": "datawisata.models.Datawisata.objects", "line_number": 5, "usage_type": "attribute"}, {"api_name": "datawisata.models.Datawisata", "line_number": 5, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "98440126", "text": "import asyncio\nimport hashlib\nfrom dill.source import getsource\nfrom itertools import chain\n\nimport pytest\nfrom yarl import URL\n\nfrom aiobotocore.endpoint import ClientResponseProxy\n\nimport aiohttp\nfrom aiohttp.client import ClientResponse\nimport botocore\nfrom botocore.args import ClientArgsCreator\nfrom botocore.client import ClientCreator, BaseClient, Config\nfrom botocore.endpoint import convert_to_response_dict, Endpoint, \\\n EndpointCreator\nfrom botocore.paginate import PageIterator, ResultKeyIterator\nfrom botocore.session import Session, get_session\nfrom botocore.waiter import NormalizedOperationMethod, Waiter, \\\n create_waiter_with_client\nfrom botocore.eventstream import EventStream\nfrom botocore.parsers import ResponseParserFactory, PROTOCOL_PARSERS, \\\n RestXMLParser, EC2QueryParser, QueryParser, JSONParser, RestJSONParser\nfrom botocore.response import StreamingBody\n\n\n# This file ensures that our private patches will work going forward. If a\n# method gets updated this will assert and someone will need to validate:\n# 1) If our code needs to be updated\n# 2) If our minimum botocore version needs to be updated\n# 3) If we need to replace the below hash (not backwards compatible) or add\n# to the set\n\n# The follow is for our monkeypatches for read_timeout:\n# github.com/aio-libs/aiobotocore/pull/248\n_AIOHTTP_DIGESTS = {\n # for using _body\n ClientResponse: {'e178726065b609c69a1c02e8bb78f22efce90792'},\n}\n\n# These are guards to our main patches\n\n# !!! README: HOW TO UPDATE THESE !!!\n# -----------------------------------\n# (tests break with new version of aiohttp/botocore)\n#\n# 1) Adding support for more versions of aiohttp/botocore\n# In this scenario you need to ensure that aiobotocore supports the changes\n# that broke these tests along with the old versions of the libraries\n# and APPEND to the set of hashes that we support for each object you\n# validated.\n# 2) Bumping up the base version of aiohttp/botocore that we support\n# In this scenario ensure aiobotocore supports the new version of the libs\n# and REPLACE all entries with the current hashes with the new libs.\n\n# REPLACE = backwards incompatible change\n# APPEND = officially supporting more versions of botocore/aiohttp\n\n# If you're changing these, most likely need to update setup.py as well.\n_API_DIGESTS = {\n # args.py\n ClientArgsCreator.get_client_args: {'e3a44e6f50159e8e31c3d76f5e8a1110dda495fa'},\n\n # client.py\n ClientCreator._create_client_class: {'5e493d069eedbf314e40e12a7886bbdbcf194335'},\n ClientCreator._get_client_args: {'555e1e41f93df7558c8305a60466681e3a267ef3'},\n\n BaseClient._make_api_call: {'0c59329d4c8a55b88250b512b5e69239c42246fb'},\n BaseClient._make_request: {'033a386f7d1025522bea7f2bbca85edc5c8aafd2'},\n BaseClient.get_paginator: {'c69885f5f73fae048c0b93b43bbfcd1f9c6168b8'},\n BaseClient.get_waiter: {'23d57598555bfbc4c6e7ec93406d05771f108d9e'},\n\n # config.py\n Config.merge: {'c3dd8c3ffe0da86953ceba4a35267dfb79c6a2c8'},\n Config: {'2dcc44190a3dc2a4b26ab0ed9410daefcd7c93c1'},\n\n # endpoint.py\n convert_to_response_dict: {'2c73c059fa63552115314b079ae8cbf5c4e78da0'},\n\n Endpoint._send_request: {'50ab33d6f16e75594d01ab1c2ec6b7c7903798db'},\n Endpoint._get_response: {'46c3a8cb4ff7672b75193ce5571dbea48aa9da75'},\n Endpoint._do_get_response: {'df29f099d26dc057834c7b25d3b5217f1f7acbe4'},\n Endpoint._needs_retry: {'0f40f52d8c90c6e10b4c9e1c4a5ca00ef2c72850'},\n Endpoint._send: {'644c7e5bb88fecaa0b2a204411f8c7e69cc90bf1'},\n\n EndpointCreator.create_endpoint: {'36065caa2398573be229bee500e27303bc362348'},\n\n # eventstream.py\n EventStream._create_raw_event_generator: {\n 'cc101f3ca2bca4f14ccd6b385af900a15f96967b'},\n EventStream.__iter__: {'8a9b454943f8ef6e81f5794d641adddd1fdd5248'},\n\n # paginate.py\n PageIterator.__iter__: {'56b3a1e30f488e2f1f5d5309db42fd5ad8a3895d'},\n PageIterator.result_key_iters: {'04d3c647bd98caba3687df80e650fea517a0068e'},\n PageIterator.build_full_result: {'afe8cd8daad2cf32ae34f877985ab79501bf7742'},\n ResultKeyIterator: {'f71d98959ccda5e05e35cf3cf224fbc9310d33bb'},\n\n # parsers.py\n ResponseParserFactory.create_parser: {'5cf11c9acecd1f60a013f6facbe0f294daa3f390'},\n RestXMLParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n EC2QueryParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n QueryParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n JSONParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n RestJSONParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n\n # response.py\n StreamingBody: {'bb4d872649b0c118c9a3d5e44961e1bea92eb79c'},\n\n # session.py\n Session.__init__: {'ccf156a76beda3425fb54363f3b2718dc0445f6d'},\n Session.create_client: {'36f4e718fc4bada66808c2f98fa71835c09076f7'},\n get_session: {'c47d588f5da9b8bde81ccc26eaef3aee19ddd901'},\n\n # waiter.py\n NormalizedOperationMethod.__call__: {'79723632d023739aa19c8a899bc2b814b8ab12ff'},\n Waiter.wait: {'5502a89ed740fb5d6238a6f72a3a08efc1a9f43b'},\n create_waiter_with_client: {'c3d12c9a4293105cc8c2ecfc7e69a2152ad564de'},\n}\n\n_PROTOCOL_PARSER_CONTENT = {'ec2', 'query', 'json', 'rest-json', 'rest-xml'}\n\n\n@pytest.mark.moto\ndef test_protocol_parsers():\n # Check that no new parsers have been added\n current_parsers = set(PROTOCOL_PARSERS.keys())\n assert current_parsers == _PROTOCOL_PARSER_CONTENT\n\n\n# NOTE: this doesn't require moto but needs to be marked to run with coverage\n@pytest.mark.moto\ndef test_patches():\n print(\"Botocore version: {} aiohttp version: {}\".format(\n botocore.__version__, aiohttp.__version__))\n\n success = True\n for obj, digests in chain(_AIOHTTP_DIGESTS.items(), _API_DIGESTS.items()):\n digest = hashlib.sha1(getsource(obj).encode('utf-8')).hexdigest()\n if digest not in digests:\n print(\"Digest of {}:{} not found in: {}\".format(\n obj.__qualname__, digest, digests))\n success = False\n\n assert success\n\n\n# NOTE: this doesn't require moto but needs to be marked to run with coverage\n@pytest.mark.moto\n@pytest.mark.asyncio\nasync def test_set_status_code():\n resp = ClientResponseProxy(\n 'GET', URL('http://foo/bar'),\n loop=asyncio.get_event_loop(),\n writer=None, continue100=None, timer=None,\n request_info=None,\n traces=None,\n session=None)\n resp.status_code = 500\n assert resp.status_code == 500\n", "sub_path": "tests/test_patches.py", "file_name": "test_patches.py", "file_ext": "py", "file_size_in_byte": 6532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "aiohttp.client.ClientResponse", "line_number": 39, "usage_type": "name"}, {"api_name": "botocore.args.ClientArgsCreator.get_client_args", "line_number": 63, "usage_type": "attribute"}, {"api_name": "botocore.args.ClientArgsCreator", "line_number": 63, "usage_type": "name"}, {"api_name": "botocore.client.ClientCreator._create_client_class", "line_number": 66, "usage_type": "attribute"}, {"api_name": "botocore.client.ClientCreator", "line_number": 66, "usage_type": "name"}, {"api_name": "botocore.client.ClientCreator._get_client_args", "line_number": 67, "usage_type": "attribute"}, {"api_name": "botocore.client.ClientCreator", "line_number": 67, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient._make_api_call", "line_number": 69, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 69, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient._make_request", "line_number": 70, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 70, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient.get_paginator", "line_number": 71, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 71, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient.get_waiter", "line_number": 72, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 72, "usage_type": "name"}, {"api_name": "botocore.client.Config.merge", "line_number": 75, "usage_type": "attribute"}, {"api_name": "botocore.client.Config", "line_number": 75, "usage_type": "name"}, {"api_name": "botocore.client.Config", "line_number": 76, "usage_type": "name"}, {"api_name": "botocore.endpoint.convert_to_response_dict", "line_number": 79, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._send_request", "line_number": 81, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 81, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._get_response", "line_number": 82, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 82, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._do_get_response", "line_number": 83, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 83, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._needs_retry", "line_number": 84, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 84, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._send", "line_number": 85, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 85, "usage_type": "name"}, {"api_name": "botocore.endpoint.EndpointCreator.create_endpoint", "line_number": 87, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.EndpointCreator", "line_number": 87, "usage_type": "name"}, {"api_name": "botocore.eventstream.EventStream._create_raw_event_generator", "line_number": 90, "usage_type": "attribute"}, {"api_name": "botocore.eventstream.EventStream", "line_number": 90, "usage_type": "name"}, {"api_name": "botocore.eventstream.EventStream.__iter__", "line_number": 92, "usage_type": "attribute"}, {"api_name": "botocore.eventstream.EventStream", "line_number": 92, "usage_type": "name"}, {"api_name": "botocore.paginate.PageIterator.__iter__", "line_number": 95, "usage_type": "attribute"}, {"api_name": "botocore.paginate.PageIterator", "line_number": 95, "usage_type": "name"}, {"api_name": "botocore.paginate.PageIterator.result_key_iters", "line_number": 96, "usage_type": "attribute"}, {"api_name": "botocore.paginate.PageIterator", "line_number": 96, "usage_type": "name"}, {"api_name": "botocore.paginate.PageIterator.build_full_result", "line_number": 97, "usage_type": "attribute"}, {"api_name": "botocore.paginate.PageIterator", "line_number": 97, "usage_type": "name"}, {"api_name": "botocore.paginate.ResultKeyIterator", "line_number": 98, "usage_type": "name"}, {"api_name": "botocore.parsers.ResponseParserFactory.create_parser", "line_number": 101, "usage_type": "attribute"}, {"api_name": "botocore.parsers.ResponseParserFactory", "line_number": 101, "usage_type": "name"}, {"api_name": "botocore.parsers.RestXMLParser._create_event_stream", "line_number": 102, "usage_type": "attribute"}, {"api_name": "botocore.parsers.RestXMLParser", "line_number": 102, "usage_type": "name"}, {"api_name": "botocore.parsers.EC2QueryParser._create_event_stream", "line_number": 103, "usage_type": "attribute"}, {"api_name": "botocore.parsers.EC2QueryParser", "line_number": 103, "usage_type": "name"}, {"api_name": "botocore.parsers.QueryParser._create_event_stream", "line_number": 104, "usage_type": "attribute"}, {"api_name": "botocore.parsers.QueryParser", "line_number": 104, "usage_type": "name"}, {"api_name": "botocore.parsers.JSONParser._create_event_stream", "line_number": 105, "usage_type": "attribute"}, {"api_name": "botocore.parsers.JSONParser", "line_number": 105, "usage_type": "name"}, {"api_name": "botocore.parsers.RestJSONParser._create_event_stream", "line_number": 106, "usage_type": "attribute"}, {"api_name": "botocore.parsers.RestJSONParser", "line_number": 106, "usage_type": "name"}, {"api_name": "botocore.response.StreamingBody", "line_number": 109, "usage_type": "name"}, {"api_name": "botocore.session.Session.__init__", "line_number": 112, "usage_type": "attribute"}, {"api_name": "botocore.session.Session", "line_number": 112, "usage_type": "name"}, {"api_name": "botocore.session.Session.create_client", "line_number": 113, "usage_type": "attribute"}, {"api_name": "botocore.session.Session", "line_number": 113, "usage_type": "name"}, {"api_name": "botocore.session.get_session", "line_number": 114, "usage_type": "name"}, {"api_name": "botocore.waiter.NormalizedOperationMethod.__call__", "line_number": 117, "usage_type": "attribute"}, {"api_name": "botocore.waiter.NormalizedOperationMethod", "line_number": 117, "usage_type": "name"}, {"api_name": "botocore.waiter.Waiter.wait", "line_number": 118, "usage_type": "attribute"}, {"api_name": "botocore.waiter.Waiter", "line_number": 118, "usage_type": "name"}, {"api_name": "botocore.waiter.create_waiter_with_client", "line_number": 119, "usage_type": "name"}, {"api_name": "botocore.parsers.PROTOCOL_PARSERS.keys", "line_number": 128, "usage_type": "call"}, {"api_name": "botocore.parsers.PROTOCOL_PARSERS", "line_number": 128, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 125, "usage_type": "attribute"}, {"api_name": "botocore.__version__", "line_number": 136, "usage_type": "attribute"}, {"api_name": "aiohttp.__version__", "line_number": 136, "usage_type": "attribute"}, {"api_name": "itertools.chain", "line_number": 139, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 140, "usage_type": "call"}, {"api_name": "dill.source.getsource", "line_number": 140, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 133, "usage_type": "attribute"}, {"api_name": "aiobotocore.endpoint.ClientResponseProxy", "line_number": 153, "usage_type": "call"}, {"api_name": "yarl.URL", "line_number": 154, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 155, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 151, "usage_type": "attribute"}]} +{"seq_id": "611345819", "text": "from bot_detection import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.feature_selection import RFE\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import precision_recall_fscore_support,precision_score,recall_score,f1_score\nfrom sklearn.preprocessing import MinMaxScaler \nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\ndef scale(df):\n X = df.loc[:,:]\n scaler=StandardScaler()\n names=X.columns\n scaled_df = scaler.fit_transform(X) #our real time input\n scaled_df=pd.DataFrame(scaled_df,columns=names)\n return scaled_df\n\n\ndef model_eval(X,y,result):\n X_train, X_test, y_train, y_test = train_test_split(X,np.ravel(y), test_size=0.25,random_state=25)\n model=LogisticRegression(solver='liblinear')\n model.fit(X_train,y_train)\n print(\"----------------Logistic Regression-------------------\")\n p=model.predict(X_test)\n ans=X_test.copy()\n ans[\"Actual\"]=y_test\n ans[\"Predicted\"]=p\n ans1=ans[ans['Actual']==0]\n y_true=ans1[\"Actual\"]\n y_predict=ans1[\"Predicted\"]\n prf=precision_recall_fscore_support(y_true, y_predict, average='micro')\n #print(X_test)\n print('Accuracy : {:.2f}'.format(model.score(X_test, y_test)))\n print('Precision :',precision_score(y_test,p))\n print('Recall :',recall_score(y_test,p))\n print('F1-score :',f1_score(y_test,p))\n print(\"----------------Logisticc Regression-------------------\")\n\n\n\ndef svm(x,y,result):\n model=SVC()\n X_train, X_test, y_train, y_test = train_test_split(x,np.ravel(y), test_size=0.25,random_state=15)\n model.fit(X_train,y_train)\n print('\\n\\n\\n')\n print(\"----------------SVM-------------------------\")\n p=model.predict(X_test)\n ans=X_test.copy()\n ans[\"Actual\"]=y_test\n ans[\"Predicted\"]=p\n ans1=ans[ans['Actual']==0]\n y_true=ans1[\"Actual\"]\n y_predict=ans1[\"Predicted\"]\n prf=precision_recall_fscore_support(y_true, y_predict, average='micro')\n print('Accuracy : {:.2f}'.format(model.score(X_test, y_test)))\n print('Precision :',precision_score(y_test,p))\n print('Recall :',recall_score(y_test,p))\n print('F1-score :',f1_score(y_test,p))\n print(\"----------------SVM-------------------------\")\n #predict_svm=model.predict(result)\n #print(\"predicted by svm \", predict_svm)\n\n\ndef random_forest(x,y,result):\n rf_model = RandomForestClassifier(n_estimators=100, # Number of trees 100 default\n # max_features=2, # Num features considered ,\n oob_score=True,\n min_samples_split=10, #min no of samples required to split the tree\n max_depth=100, #default is none or untill min_samples condition is met\n max_features='auto',\n # bootstrap=False # if false, whole dataset is used for each tree\n )\n X_train, X_test, y_train, y_test = train_test_split(x,np.ravel(y), test_size=0.25,random_state=15)\n rf_model.fit(X_train,y_train)\n print('\\n')\n print(\"----------------Random forest-------------------\")\n p=rf_model.predict(X_test)\n ans=X_test.copy()\n ans[\"Actual\"]=y_test\n ans[\"Predicted\"]=p\n# print(ans)\n ans1=ans[ans['Actual']==0]\n y_true=ans1[\"Actual\"]\n y_predict=ans1[\"Predicted\"]\n prf=precision_recall_fscore_support(y_test, p, average='micro')\n #print(X_test)\n print('Accuracy : {:.2f}'.format(rf_model.score(X_test, y_test)))\n print('Precision :',precision_score(y_test,p))\n print('Recall :',recall_score(y_test,p))\n print('F1-score :',f1_score(y_test,p))\n print(\"----------------Random forest-------------------\")\n prediction=rf_model.predict(result)\n print(\"\\nPrediction : \",prediction[0])\n if(prediction[0]==1):\n user='Spam'\n elif(prediction[0]==0):\n user='Not Spam'\n print(\"Final User Result : \",user)\n #print('Accuracy : '.accuracy_score(y_test,predict1))\n #print(\"predicted by random forest \", predict1)\n\ndef rfe_model(df,y,result):\n X_train, X_test, y_train, y_test = train_test_split(df,np.ravel(y), test_size=0.25,random_state=25)\n model=LogisticRegression(solver='liblinear')\n rfe=RFE(model,18)\n rfe = rfe.fit(X_train,y_train)\n print(rfe.support_)\n print(rfe.ranking_)\n cols=[\"description\",\"verified\",\"age\",\"followers\",\"mentions\",\"url_ratio\",\"hashtag\",\"retweet_rate\",\"mean_of_intertweet_delay\",\"SD\",\"avg_tweets_day\",\"avg_tweeets_week\"]\n new_df=pd.DataFrame()\n for col in cols:\n new_df[col]=df[col]\n X_train, X_test, y_train, y_test = train_test_split(new_df,np.ravel(y), test_size=0.25,random_state=25)\n model=LogisticRegression(solver='liblinear')\n model.fit(X_train,y_train)\n model_eval(new_df,y,result)\n svm(new_df,y,result)\n random_forest(new_df,y,result)\n\n\n#__main__\ndata=pd.read_csv('tweet_info.txt',sep=\" \",header=None)\ndata.columns=[\"description\",\"verified\",\"age\",\"following\",\"followers\",\"reputation\",\"mentions\",\"unique_mentions\",\"url_ratio\",\"hashtag\",\"content_similarity\",\"retweet_rate\",\"reply_rate\",\"no_of_tweets\",\"mean_of_intertweet_delay\",\"SD\",\"avg_tweets_day\",\"avg_tweeets_week\",\"s1\",\"s2\",\"fofo\",\"following_rate\",\"0-3\",\"3-6\",\"6-9\",\"9-12\",\"12-15\",\"15-18\",\"18-21\",\"21-24\"]\nlabel=pd.read_csv(\"label.txt\",header=None)\ndata[\"label\"]=label\n\nprint(data)\n\nX = data.loc[:, data.columns != 'label']\ny = data.loc[:, data.columns == 'label']\n\n\nscaler=StandardScaler()\ncols=[\"description\",\"verified\",\"age\",\"followers\",\"mentions\",\"url_ratio\",\"hashtag\",\"retweet_rate\",\"mean_of_intertweet_delay\",\"SD\",\"avg_tweets_day\",\"avg_tweeets_week\"]\nnew_df=pd.DataFrame()\nfor col in cols:\n new_df[col]=X[col]\nprint(new_df)\n\nrfe_model(new_df,y,extract_df)\n\nscaled_df = scaler.fit_transform(new_df)\nscaled_df=pd.DataFrame(scaled_df,columns=cols)\nresult=scaler.transform(extract_df)\nprint(\"\\n\\nAfter Scaling\\nresult is \", result)\nrfe_model(scaled_df,y,result)\n\n#def svm(x,y):\n# model=SVC()\n# X_train, X_test, y_train, y_test = train_test_split(x,np.ravel(y), test_size=0.25,random_state=15)\n# model.fit(X_train,y_train)\n# pred = model.predict(X_test)\n# print(confusion_matrix(y_test, pred))\n# print(classification_report(y_test, pred))\n\n#svm(scaled_df,np.ravel(y))\n\n#random_forest(scaled_df,np.ravel(y),result)\n\n#svm(new_df,np.ravel(y))\n\n#%matplotlib inline\nmatplotlib.style.use('ggplot')\nnp.random.seed(1)\ndf = pd.DataFrame({\n 'logistic_reg': np.random.normal(0, 2, 10000),\n 'svm': np.random.normal(5, 3, 10000),\n 'random_forest': np.random.normal(-5, 5, 10000)\n})\n\nscaler = preprocessing.StandardScaler()\nscaled_df = scaler.fit_transform(df)\nscaled_df = pd.DataFrame(scaled_df, columns=['logistic_reg', 'svm', 'random_forest'])\nimport seaborn as sns\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 5))\nax1.set_title('Before Scaling')\nsns.kdeplot(df['logistic_reg'], ax=ax1)\nsns.kdeplot(df['svm'], ax=ax1)\nsns.kdeplot(df['random_forest'], ax=ax1)\nax2.set_title('After Scaleing')\nsns.kdeplot(scaled_df['logistic_reg'], ax=ax2)\nsns.kdeplot(scaled_df['svm'], ax=ax2)\nsns.kdeplot(scaled_df['random_forest'], ax=ax2)\nplt.show()\n\n\n", "sub_path": "twitter_analysis_final.py", "file_name": "twitter_analysis_final.py", "file_ext": "py", "file_size_in_byte": 7540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 82, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 122, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.style.use", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 172, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 177, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 180, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "seaborn.kdeplot", "line_number": 186, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 187, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 188, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 190, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 191, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}]} +{"seq_id": "496499434", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 8 19:49:25 2021\n\n@author: user\n\"\"\"\nimport keras\nfrom keras.layers import Input,Lambda,Dense,Flatten\nfrom keras.models import Model\n#from keras.applications.vgg19 import VGG19\nfrom keras.applications.xception import Xception as xc\nfrom keras_applications.xception import preprocess_input\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nimport numpy as np\nfrom glob import glob\n#spacifying the path of images\ntrain_path='Datasets/train'\ntest_path='Datasets/test'\n\nIMAGW_SIZE=[224,224]\n#creating the obgetct of resnet50 class\nxcep=xc(input_shape=IMAGW_SIZE+[3],weights='imagenet',include_top=False)\nfor layers in xcep.layers:\n layers.trainable=False\n \n \n \nfolder=glob('Datasets/train/*') \nx=Flatten()(xcep.output)\nprediction=Dense(2,activation='softmax')(x)\nmodel=Model(inputs=xcep.input, outputs=prediction)\n\n\nmodel.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\ntrain_datgen=ImageDataGenerator(rescale=1./255,zoom_range=0.2,shear_range=0.2,horizontal_flip=True)\ntest_datagen=ImageDataGenerator(rescale=1./255)\n\n\n\ntrainset=train_datgen.flow_from_directory('Datasets/train',\n target_size=(224,224),\n batch_size=32,\n class_mode='categorical')\ntestset=test_datagen.flow_from_directory('Datasets/test',\n target_size=(224,224),\n batch_size=32,\n class_mode='categorical')\n\n\nt=model.fit_generator(trainset,\n validation_data=testset,\n epochs=2,\n steps_per_epoch=len(trainset),\n validation_steps=len(testset)\n )\n\n\n\n\n\n\nimport tensorflow as tf\nfrom keras.models import load_model\n\n\nmodel.save('face_detection_model.h5')\n\n\n\n\n\n", "sub_path": "face_detection_model.py", "file_name": "face_detection_model.py", "file_ext": "py", "file_size_in_byte": 2037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.applications.xception.Xception", "line_number": 24, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "533778675", "text": "import os\nimport torch\n\nfrom skimage import io, transform\nimport numpy as np\nimport torchvision\nimport torch.nn as nn\nimport math\nfrom cropextract import *\nfrom torch.utils.data import Dataset, DataLoader\nimport scipy.misc as smi\nimport imageio\n\nimport scipy.io as sio\nimport scipy.misc as smi\nimport pickle\nfrom matplotlib import pyplot as plt\n\nclass PlanarPatchDataset(Dataset):\n def __init__(self, csv_file, root_dir, transform=None):\n f = open(os.path.join(root_dir, 'train.pkl'), 'rb')\n self.landmarks_frame = pickle.load(f)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.landmarks_frame)\n #return 256\n\n def __getitem__(self, idx):\n\n a = str(self.landmarks_frame[idx][1][0])\n p = str(self.landmarks_frame[idx][1][1])\n n = str(self.landmarks_frame[idx][1][2])\n\n rgb_anchor = os.path.join(self.root_dir, 'images/'+a+'.rgb.npy')\n normal_anchor = os.path.join(self.root_dir, 'images/'+a+'.n.npy')\n mask_anchor = os.path.join(self.root_dir, 'planes/' + self.landmarks_frame[idx][0][0]+'.npy')\n\n rgb_positive = os.path.join(self.root_dir, 'images/'+ p + '.rgb.npy')\n normal_positive = os.path.join(self.root_dir, 'images/'+ p + '.n.npy')\n mask_positive = os.path.join(self.root_dir, 'planes/' + self.landmarks_frame[idx][0][1]+'.npy')\n\n rgb_negative = os.path.join(self.root_dir, 'images/'+n + '.rgb.npy')\n normal_negative = os.path.join(self.root_dir, 'images/'+n + '.n.npy')\n mask_negative = os.path.join(self.root_dir, 'planes/' + self.landmarks_frame[idx][0][2]+'.npy')\n\n\n\n\n size = (240, 320)\n # rgb_anchor_image = cv2.resize(imageio.imread(rgb_anchor), dsize=size, interpolation=cv2.INTER_LINEAR)\n # normal_anchor_image = cv2.resize(imageio.imread(normal_anchor), dsize=size, interpolation=cv2.INTER_LINEAR)\n # mask_anchor_image = cv2.resize(imageio.imread(mask_anchor), dsize=size, interpolation=cv2.INTER_LINEAR)\n # rgb_negative_image = cv2.resize(imageio.imread(rgb_negative), dsize=size, interpolation=cv2.INTER_LINEAR)\n # normal_negative_image = cv2.resize(imageio.imread(normal_negative), dsize=size, interpolation=cv2.INTER_LINEAR)\n # mask_negative_image = cv2.resize(imageio.imread(mask_negative), dsize=size, interpolation=cv2.INTER_LINEAR)\n # rgb_positive_image = cv2.resize(imageio.imread(rgb_positive), dsize=size, interpolation=cv2.INTER_LINEAR)\n # normal_positive_image = cv2.resize(imageio.imread(normal_positive), dsize=size, interpolation=cv2.INTER_LINEAR)\n # mask_positive_image = cv2.resize(imageio.imread(mask_positive), dsize=size, interpolation=cv2.INTER_LINEAR)\n\n rgb_anchor_image = np.load(rgb_anchor)\n normal_anchor_image = np.load(normal_anchor)\n mask_anchor_image = np.load(mask_anchor)\n rgb_negative_image = np.load(rgb_negative)\n normal_negative_image = np.load(normal_negative)\n mask_negative_image = np.load(mask_negative)\n rgb_positive_image = np.load(rgb_positive)\n normal_positive_image = np.load(normal_positive)\n mask_positive_image = np.load(mask_positive)\n\n # m = 0.1 * np.stack((mask_anchor_image.astype(np.uint8), np.zeros(size), np.zeros(size)) , axis=-1)\n # plt.imshow( normal_anchor_image + m.astype(np.int))\n # plt.show()\n #\n # m = 0.1 * np.stack((mask_positive_image.astype(np.uint8), np.zeros(size), np.zeros(size)), axis=-1)\n # plt.imshow(normal_positive_image + m.astype(np.int))\n # plt.show()\n #\n # m = 0.1 * np.stack((mask_negative_image.astype(np.uint8), np.zeros(size), np.zeros(size)), axis=-1)\n # plt.imshow(normal_negative_image + m.astype(np.int))\n # plt.show()\n\n #plt.imshow(np.ma.array(mask_anchor_image, mask=~mask_anchor_image))\n\n\n sample = {\n 'rgb_anchor_image': rgb_anchor_image,\n 'normal_anchor_image': normal_anchor_image,\n 'mask_anchor_image': mask_anchor_image,\n 'rgb_negative_image': rgb_negative_image,\n 'normal_negative_image': normal_negative_image,\n 'mask_negative_image': mask_negative_image,\n 'rgb_positive_image': rgb_positive_image,\n 'normal_positive_image': normal_positive_image,\n 'mask_positive_image': mask_positive_image\n }\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n rgb_anchor_image,\\\n normal_anchor_image,\\\n mask_anchor_image,\\\n rgb_negative_image,\\\n normal_negative_image,\\\n mask_negative_image,\\\n rgb_positive_image,\\\n normal_positive_image,\\\n mask_positive_image = \\\n sample['rgb_anchor_image'],\\\n sample['normal_anchor_image'],\\\n sample['mask_anchor_image'],\\\n sample['rgb_negative_image'],\\\n sample['normal_negative_image'],\\\n sample['mask_negative_image'],\\\n sample['rgb_positive_image'],\\\n sample['normal_positive_image'],\\\n sample['mask_positive_image']\n\n\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n\n rgb_anchor_image = rgb_anchor_image.astype(np.float).transpose((2, 0, 1)) - 128\n rgb_negative_image = rgb_negative_image.astype(np.float).transpose((2, 0, 1)) - 128\n rgb_positive_image = rgb_positive_image.astype(np.float).transpose((2, 0, 1)) - 128\n\n normal_anchor_image = normal_anchor_image.astype(np.float).transpose((2, 0, 1)) - 128\n normal_negative_image = normal_negative_image.astype(np.float).transpose((2, 0, 1)) - 128\n normal_positive_image = normal_positive_image.astype(np.float).transpose((2, 0, 1)) - 128\n\n mask_anchor_image = mask_anchor_image.astype(np.float) - 128\n mask_negative_image = mask_negative_image.astype(np.float) - 128\n mask_positive_image = mask_positive_image.astype(np.float) - 128\n\n return {\n 'rgb_anchor_image': torch.from_numpy(rgb_anchor_image),\n 'normal_anchor_image': torch.from_numpy(normal_anchor_image),\n 'mask_anchor_image': torch.from_numpy(mask_anchor_image),\n 'rgb_negative_image': torch.from_numpy(rgb_negative_image),\n 'normal_negative_image': torch.from_numpy(normal_negative_image),\n 'mask_negative_image': torch.from_numpy(mask_negative_image),\n 'rgb_positive_image': torch.from_numpy(rgb_positive_image),\n 'normal_positive_image': torch.from_numpy(normal_positive_image),\n 'mask_positive_image': torch.from_numpy(mask_positive_image)\n }\n\n", "sub_path": "dataset_planematch.py", "file_name": "dataset_planematch.py", "file_ext": "py", "file_size_in_byte": 6930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 22, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 142, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "519944723", "text": "import threading\nimport time\nimport modbus_poller\n#import socket\nimport PID\nimport socketio\nimport datetime\nimport pandas as pd\n\ndef run_item(f, item):\n result_info = [threading.Event(), None]\n def runit():\n result_info[1] = f(item)\n result_info[0].set()\n threading.Thread(target=runit).start()\n return result_info\n\ndef gather_results(result_infos):\n results = [] \n for i in range(len(result_infos)):\n result_infos[i][0].wait()\n results.append(result_infos[i][1])\n return results\n\nclass ModbusValue:\n def __init__(self, name, unitId, registers, scalefactor):\n self.name = name\n self.unitId = unitId\n self.registers = registers\n self.scalefactor = scalefactor\n\ndef proc(item):\n c = modbus_poller.initModbusDevice('84.9.41.168', 502, item.unitId, True)\n val = modbus_poller.getHoldingRegisterValue(c, item.registers[0], 1, item.scalefactor)\n return modbus_poller.convertToSignedInt(val[0])/item.scalefactor\n\n#database connection details\n# HOST = 'localhost'\n# PORT = 9009\n# sock= socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Initialise WebSocket\nsio = socketio.Client()\n\n@sio.on('my message')\ndef on_message(data):\n print('I received a message!')\n\n@sio.event\ndef connect():\n print(\"I'm connected!\")\n\n@sio.event\ndef connect_error(data):\n print(\"The connection failed!\")\n\n@sio.event\ndef disconnect():\n print(\"I'm disconnected!\")\n\nsio.connect('http://localhost:8080')\nprint('my sid is', sio.sid)\n\n#pid parameters\nP = 1.4\nI = 1\nD = 0.001\npid = PID.PID(P,I,D)\n\n#initialise system parameters\nsolar_inverter = ModbusValue('solar_inverter_W', 32, [1029], 1)\nsolar_charger = ModbusValue('solar_charger_W', 245, [789], 10)\nac_loads = ModbusValue('ac_loads_W', 100, [817], 1)\nsoc = ModbusValue('soc_W', 100, [843], 1)\nchargeState = ModbusValue('soc_W', 100, [844], 1)\ngrid = ModbusValue('grid_W', 31, [2600], 1)\nreal_grid_sp = ModbusValue('real_grid_sp_W', 246, [37], 1)\nbattery = ModbusValue('battery', 100, [842], 1)\nbatteryVoltage = ModbusValue('battery', 100, [840], 10)\nbatteryCurrent = ModbusValue('battery', 100, [841], 10)\nbatteryTemperature = ModbusValue('battery', 225, [262], 10)\n\n\nmodbusDevices = [solar_inverter, solar_charger, ac_loads, grid, real_grid_sp, \nsoc, chargeState, battery, batteryVoltage, batteryCurrent, batteryTemperature]\nmodbusValues = gather_results([run_item(proc, item) for item in modbusDevices])\n\npropertyLoadW = modbusValues[2]\npVGenerationW = modbusValues[0] + modbusValues[1]\nsetPointW = propertyLoadW - pVGenerationW\n\nfeedback = 0\npid.SetPoint = setPointW\npid.setSampleTime(0.01)\n\ntry:\n #connect to database\n #sock.connect((HOST, PORT))\n \n i = 0\n while True:\n i = i + 1\n modbusValues = gather_results([run_item(proc, item) for item in modbusDevices])\n propertyLoadW = modbusValues[2] \n pVGenerationW = modbusValues[0] + modbusValues[1]\n setPointW = propertyLoadW - pVGenerationW\n \n pid.update(feedback)\n output = pid.output\n \n feedback += (output - (1 / i))\n \n pid.SetPoint = setPointW\n \n #push readings to database\n #dataStr = gridImportExportW_real=' + str(modbusValues[3])\n \n timeNow = str(datetime.datetime.now())\n dataStr = { 'sensor': [\n { 'name': 'propertyLoad', 'point': { 'timestamp': timeNow, 'value': str(propertyLoadW) } },\n { 'name': 'pVGeneration', 'point': { 'timestamp': timeNow, 'value': str(pVGenerationW) } },\n { 'name': 'setPointSimulated', 'point': { 'timestamp': timeNow, 'value': str(setPointW) } },\n { 'name': 'setPointReal', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[4]) } },\n { 'name': 'gridSimulated', 'point': { 'timestamp': timeNow, 'value': str(feedback) } },\n { 'name': 'gridReal', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[3]) } },\n { 'name': 'soc', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[5]) } },\n { 'name': 'chargeState', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[6]) }},\n { 'name': 'battery', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[7]) }},\n { 'name': 'batteryVoltage', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[8]) }},\n { 'name': 'batteryCurrent', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[9]) }},\n { 'name': 'batteryTemperature', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[10]) }}\n ]} \n\n\n currentTime= datetime.datetime.now()\n sio.emit('my message', {'data': dataStr })\n \n #sock.sendall(dataStr.encode())\n time.sleep(1)\n \nexcept Exception as e:\n print(\"Got error: %s\" % (e))\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#simulated_grid_sp = ModbusDevice()\n#print(gather_results([run_item(proc, item) for item in [1, 2, 10, 100]]))\n", "sub_path": "Contoller/real-time-graph.py", "file_name": "real-time-graph.py", "file_ext": "py", "file_size_in_byte": 4789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "threading.Event", "line_number": 11, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 15, "usage_type": "call"}, {"api_name": "modbus_poller.initModbusDevice", "line_number": 33, "usage_type": "call"}, {"api_name": "modbus_poller.getHoldingRegisterValue", "line_number": 34, "usage_type": "call"}, {"api_name": "modbus_poller.convertToSignedInt", "line_number": 35, "usage_type": "call"}, {"api_name": "socketio.Client", "line_number": 43, "usage_type": "call"}, {"api_name": "PID.PID", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "185650775", "text": "\nfrom modules.PreactDoubleLayer import PreactDoubleLayer\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import *\nimport torch.optim as optim\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nvFeat = [4, 4]\nnChan = vFeat[0]\nnClasses = 5\n\n# random batch\nx = normalInit([10, nChan, 32, 32]).to(device) # (numImages, numChans, image height, image width)\nW = normalInit([nChan + 1, nClasses]).to(device) # plus one for the bias\nlabels = torch.LongTensor([1, 2, 3, 4, 3, 2, 1, 0, 2, 3]).to(device)\nKconnect = normalInit([nChan, nChan, 1, 1]).to(device)\n\n# ----------------------------------------------------------------------\n# new approach\nparamsStruct = {'normLayer1': nn.BatchNorm2d(num_features=nChan),\n 'normLayer2': nn.BatchNorm2d(num_features=nChan),\n 'conv1': nn.Conv2d(in_channels=nChan, out_channels=nChan, kernel_size=3, padding=1, stride=1),\n 'conv2': nn.Conv2d(in_channels=nChan, out_channels=nChan, kernel_size=3, padding=1, stride=1)}\n\nnet = PreactDoubleLayer(vFeat, params=paramsStruct)\nnet.to(device)\norigK1 = net.conv1.weight.data.clone() # for old method\norigK2 = net.conv2.weight.data.clone() # for old method\nK1 = nn.Parameter(origK1.clone())\nK2 = nn.Parameter(origK2.clone())\noptimizer = optim.SGD(net.parameters(), lr=1e-1, momentum=0.9, weight_decay=0, nesterov=False)\n\noptimizer.zero_grad()\ny1 = net.forward(x)\ny1 = F.avg_pool2d(y1, x.shape[2:4])\n\nloss1, _ = misfitW(y1, W, labels, device)\nloss1.backward()\noptimizer.step()\n\n# ----------------------------------------------------------------------\ndef compareFunc(x, K1, K2 ,device): # functional Preactivated DoubleLayer\n z = F.relu(x)\n z = conv3x3(z, K1)\n z = F.batch_norm(z, running_mean=torch.zeros(K1.size(0) ,device=device),\n running_var=torch.ones(K1.size(0) ,device=device), training=True)\n z = F.relu(z)\n z = conv3x3(z, K2)\n z = F.batch_norm(z, running_mean=torch.zeros(K2.size(0), device=device),\n running_var=torch.ones(K2.size(0), device=device), training=True)\n return z\n# old method\noptimParams = [{'params': K1}, {'params':K2}]\nnWeights = 0\noptimizer = optim.SGD(optimParams, lr=1e-1, momentum=0.9, weight_decay=0, nesterov=False)\n\noptimizer.zero_grad()\n\ny2 = compareFunc(x, K1,K2, device)\n\ny2 = F.avg_pool2d(y2, x.shape[2:4])\n\nloss2, _ = misfitW(y2, W, labels, device)\nloss2.backward()\noptimizer.step()\n\n# ----------------------------------------------------------------------\n\n# print('layer 2-norm difference:', torch.norm(y2 - y1, p=2).data) # want = 0\n# print('loss 2-norm difference: ', torch.norm(loss2 - loss1, p=2).data) # want = 0\n# print('K1 2-norm difference:', torch.norm(net.conv1.weight.data - K1.data, p=2).data) # want = 0\n# print('K2 2-norm difference:', torch.norm(net.conv2.weight.data - K2.data, p=2).data) # want = 0\n# print('K1 update: ',torch.norm(origK1 - K1.data, p=2).data) # want > 0\n# print('K2 update: ',torch.norm(origK2 - K2.data, p=2).data) # want > 0\n\ntol = 1e-5\nassert(torch.norm(y2 - y1, p=2).data < tol)\nassert(torch.norm(loss2 - loss1, p=2).data < tol)\nassert( torch.norm(net.conv1.weight.data - K1.data, p=2).data < tol )\nassert( torch.norm(net.conv2.weight.data - K2.data, p=2).data < tol )\n\nassert( torch.norm(origK1 - K1.data, p=2).data > 1e-4)\nassert( torch.norm(origK2 - K2.data, p=2).data > 1e-4)\nprint('tests passed')\n", "sub_path": "modules/testPreactDoubleLayer.py", "file_name": "testPreactDoubleLayer.py", "file_ext": "py", "file_size_in_byte": 3521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.device", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "modules.PreactDoubleLayer.PreactDoubleLayer", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.functional.batch_norm", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.functional.batch_norm", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "447220802", "text": "import copy\nimport os\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import connections\nfrom django.test.client import RequestFactory, Client\n\ntry:\n from django.test.testcases import LiveServerThread\n LIVE_SERVER_SUPPORT = True\nexcept ImportError:\n LIVE_SERVER_SUPPORT = False\n\n\ndef pytest_funcarg__client(request):\n \"\"\"\n Returns a Django test client instance.\n \"\"\"\n return Client()\n\n\ndef pytest_funcarg__admin_client(request):\n \"\"\"\n Returns a Django test client logged in as an admin user.\n \"\"\"\n\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n user = User.objects.create_user('admin', 'admin@example.com',\n 'password')\n user.is_staff = True\n user.is_superuser = True\n user.save()\n\n client = Client()\n client.login(username='admin', password='password')\n\n return client\n\n\ndef pytest_funcarg__rf(request):\n \"\"\"\n Returns a RequestFactory instance.\n \"\"\"\n return RequestFactory()\n\n\ndef pytest_funcarg__settings(request):\n \"\"\"\n Returns a Django settings object that restores any changes after the test\n has been run.\n \"\"\"\n old_settings = copy.deepcopy(settings)\n\n def restore_settings():\n for setting in dir(old_settings):\n if setting == setting.upper():\n setattr(settings, setting, getattr(old_settings, setting))\n request.addfinalizer(restore_settings)\n return settings\n\n\nclass LiveServer(object):\n def __init__(self, host, possible_ports):\n\n connections_override = {}\n\n for conn in connections.all():\n # If using in-memory sqlite databases, pass the connections to\n # the server thread.\n if (conn.settings_dict['ENGINE'] == 'django.db.backends.sqlite3'\n and conn.settings_dict['NAME'] == ':memory:'):\n # Explicitly enable thread-shareability for this connection\n conn.allow_thread_sharing = True\n connections_override[conn.alias] = conn\n\n self.thread = LiveServerThread(host, possible_ports, connections_override)\n self.thread.daemon = True\n self.thread.start()\n\n self.thread.is_ready.wait()\n\n if self.thread.error:\n raise self.thread.error\n\n def __unicode__(self):\n return 'http://%s:%s' % (self.thread.host, self.thread.port)\n\n def __repr__(self):\n return '' % unicode(self)\n\n def __add__(self, other):\n # Support string concatenation\n return unicode(self) + other\n\n\ndef get_live_server_host_ports():\n # This code is copy-pasted from django/test/testcases.py\n\n specified_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')\n\n # The specified ports may be of the form '8000-8010,8080,9200-9300'\n # i.e. a comma-separated list of ports or ranges of ports, so we break\n # it down into a detailed list of all possible ports.\n possible_ports = []\n try:\n host, port_ranges = specified_address.split(':')\n for port_range in port_ranges.split(','):\n # A port range can be of either form: '8000' or '8000-8010'.\n extremes = map(int, port_range.split('-'))\n assert len(extremes) in [1, 2]\n if len(extremes) == 1:\n # Port range of the form '8000'\n possible_ports.append(extremes[0])\n else:\n # Port range of the form '8000-8010'\n for port in range(extremes[0], extremes[1] + 1):\n possible_ports.append(port)\n except Exception:\n raise Exception('Invalid address (\"%s\") for live server.' % specified_address)\n\n return (host, possible_ports)\n\n\ndef pytest_funcarg__live_server(request):\n if not LIVE_SERVER_SUPPORT:\n raise Exception('The kwarg liveserver is not supported in Django <= 1.3')\n\n def setup_live_server():\n return LiveServer(*get_live_server_host_ports())\n\n def teardown_live_server(live_server):\n live_server.thread.join()\n\n return request.cached_setup(setup=setup_live_server, teardown=teardown_live_server, scope='session')\n", "sub_path": "pytest_django/funcargs.py", "file_name": "funcargs.py", "file_ext": "py", "file_size_in_byte": 4241, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.test.client.Client", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 31, "usage_type": "name"}, {"api_name": "django.test.client.Client", "line_number": 37, "usage_type": "call"}, {"api_name": "django.test.client.RequestFactory", "line_number": 47, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 55, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 60, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.connections.all", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.connections", "line_number": 70, "usage_type": "name"}, {"api_name": "django.test.testcases.LiveServerThread", "line_number": 79, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 102, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "189718976", "text": "# -*- coding: utf-8 -*-\nimport os\n\nimport xlrd\nimport xlsxwriter\n\nexcel_path = 'p_files'\n\n\ndef load_p_data():\n p_scores = []\n for root, dir, files in os.walk(excel_path):\n for item in files:\n p_scores.append(read_excel(os.path.join(root, item)))\n return p_scores\n\n\ndef read_excel(path):\n try:\n workbook = xlrd.open_workbook(path, encoding_override='utf-8')\n target_sheet = workbook.sheets()[0]\n name = target_sheet.cell(1, 2).value\n emp_no = target_sheet.cell(1, 4).value\n line_manager = target_sheet.cell(2, 6).value\n employee = Employee(name, emp_no, line_manager)\n overall_self_assessment = \"%.3f\" % target_sheet.cell(11, 7).value\n overall_line_mgr_assessment = \"%.3f\" % target_sheet.cell(11, 8).value\n p_score = PScore(employee=employee, overall_self_assessment=overall_self_assessment,\n overall_line_mgr_assessment=overall_line_mgr_assessment)\n\n for row_index in range(4, 11):\n eval_item_detail = EvalItemDetail()\n eval_item_detail.item = target_sheet.cell(row_index, 1).value\n if not eval_item_detail.item.strip():\n break\n eval_item_detail.weight = \"%.2f\" % target_sheet.cell(row_index, 3).value\n eval_item_detail.self_desc = target_sheet.cell(row_index, 4).value\n eval_item_detail.self_assessment = \"%.2f\" % target_sheet.cell(row_index, 7).value\n eval_item_detail.line_mgr_assessment = \"%.2f\" % target_sheet.cell(row_index, 8).value\n p_score.add_eval_item(eval_item_detail)\n return p_score\n\n except Exception as e:\n print(\"error \", e)\n\n\ndef write_stats_to_excel(path, p_scores):\n wb = xlsxwriter.Workbook(path)\n work_sheet = wb.add_worksheet()\n headers = [\"名字\", \"主管\", \"自评\", \"主管评\"]\n for index, header in enumerate(headers):\n work_sheet.write(0, index, header)\n\n for index, p_score in enumerate(p_scores):\n if p_score:\n work_sheet.write(index, 0, p_score.employee.name)\n work_sheet.write(index, 1, p_score.employee.line_manager)\n work_sheet.write(index, 2, p_score.overall_self_assessment)\n work_sheet.write(index, 3, p_score.overall_line_mgr_assessment)\n\n wb.close()\n\n\nclass Employee:\n def __init__(self, name, emp_no, line_manager):\n self.name = name\n self.emp_no = emp_no\n self.line_manager = line_manager\n\n def __repr__(self, *args, **kwargs):\n return (\n \"{name}-{emp_no}-{line_manager}\".\n format(name=self.name, emp_no=self.emp_no, line_manager=self.line_manager))\n\n\nclass PScore:\n def __init__(self, employee, overall_self_assessment, overall_line_mgr_assessment):\n self.employee = employee\n self.overall_self_assessment = overall_self_assessment\n self.overall_line_mgr_assessment = overall_line_mgr_assessment\n self.eval_item_details = []\n\n def add_eval_item(self, eval_item_detail):\n self.eval_item_details.append(eval_item_detail)\n return self\n\n\nclass EvalItemDetail:\n def __init__(self):\n self.item = ''\n self.weight = ''\n self.self_desc = ''\n self.self_assessment = ''\n self.line_mgr_assessment = ''\n\n def __repr__(self):\n return \"_\".join([self.item, self.weight, self.self_desc, self.self_assessment, self.line_mgr_assessment])\n\n\nif __name__ == '__main__':\n write_stats_to_excel(\"all.xlsx\", load_p_data())\n", "sub_path": "get_shit_done/real_useful_project/p-view/p_stats.py", "file_name": "p_stats.py", "file_ext": "py", "file_size_in_byte": 3520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "xlrd.open_workbook", "line_number": 20, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "332429417", "text": "from __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport argparse\nimport functools\n\n# On Python3, StringIO can come from standard library io:\nfrom ruamel.yaml.compat import StringIO\nimport ruamel.yaml\n\n\ndef repository_specific_formatting(empty_line_top_level, yaml_string):\n '''Transform function to ruamel.yaml's dump function. Makes sure there are\n only empty lines inbetween different top level keys (if empty_line_top_level\n is True, otherwise no empty lines).\n '''\n\n yaml_string = re.sub(r'\\n+', r'\\n', yaml_string) # Remove all empty lines\n\n if empty_line_top_level:\n yaml_string = re.sub( # Add one empty line between each package\n r'(\\n[^\\#][^\\n]*)\\n([^\\s])', r'\\1\\n\\n\\2', yaml_string\n )\n\n return yaml_string\n\n\ndef is_repository(config):\n '''Returns `False` if the configuration corresponds to a Komodo release\n (all config elements below top level key are strings). Returns `True` if\n it corresponds to a _repository_ (all config elements below top level key are\n themselves dictionaries).\n\n Raises ValueError if inconsistent throughout the config.\n '''\n\n # For Python2+3 compatibility. On Python3-only, use isinstance(x, str)\n # instead of isinstance(x, basestring).\n try:\n basestring\n except NameError:\n basestring = str # No basestring on Python3\n\n if all([isinstance(package, basestring) for package in config.values()]):\n return False\n elif all([isinstance(package, ruamel.yaml.comments.CommentedMap)\n for package in config.values()]):\n return True\n\n raise ValueError(\n 'Inconsistent configuration file. '\n 'Not able to detect if it is a release or repository.'\n )\n\n\ndef prettier(yaml_input_string):\n '''Takes in a string corresponding to a YAML Komodo configuration, and returns\n the corresponding prettified YAML string.'''\n\n ruamel_instance = ruamel.yaml.YAML()\n ruamel_instance.indent( # Komodo prefers two space indendation\n mapping=2, sequence=4, offset=2\n )\n ruamel_instance.width = 1000 # Avoid ruamel wrapping long\n\n try:\n config = ruamel_instance.load(yaml_input_string)\n except ruamel.yaml.constructor.DuplicateKeyError as e:\n raise SystemExit(str(e))\n\n komodo_repository = is_repository(config)\n\n # On Python3.6+, sorted_config can just be an\n # ordinary dict as insertion order is then preserved.\n sorted_config = ruamel.yaml.comments.CommentedMap()\n for package in sorted(config, key=str.lower):\n sorted_config[package] = config[package]\n\n setattr(sorted_config, ruamel.yaml.comments.comment_attrib, config.ca)\n\n yaml_output = StringIO()\n ruamel_instance.dump(\n sorted_config,\n yaml_output,\n transform=functools.partial(repository_specific_formatting, komodo_repository)\n )\n\n if sys.version_info < (3, 0):\n # Need to encode the byte-string on Python2\n return yaml_output.getvalue().encode('utf-8')\n\n return yaml_output.getvalue()\n\n\ndef prettified_yaml(filepath, check_only=True):\n '''Returns `True` if the file is already \"prettified\", `False` otherwise.\n If `check_only` is False, the input file will be \"prettified\" in place if necessary.\n '''\n\n print('Checking {}... '.format(filepath), end='')\n\n with open(filepath, 'r') as fh:\n yaml_input_string = fh.read()\n\n yaml_prettified_string = prettier(yaml_input_string)\n\n if yaml_prettified_string != yaml_input_string:\n print('{} reformatted!'.format('would be' if check_only else ''))\n if not check_only:\n with open(filepath, 'w') as fh:\n fh.write(yaml_prettified_string)\n return False\n\n print('looking good!')\n return True\n\n\ndef prettier_main():\n '''Main function doing user argument parsing and calling necessary functions.\n '''\n\n parser = argparse.ArgumentParser(\n description=(\n 'Check and/or format the Komodo configuration files. '\n 'Takes in any number of yml files, which could be e.g. the main '\n 'Komodo repository and an arbitrary number of releases. '\n 'Throws a hard error if the same package is defined multiple times.'\n )\n )\n parser.add_argument(\n 'files',\n type=lambda arg: arg if os.path.isfile(arg) \\\n else parser.error('{} is not a file'.format(arg)),\n nargs='+',\n help='One or more files to format/check',\n )\n parser.add_argument(\n '--check',\n action='store_true',\n help=(\n 'Do not write the files back, just return the status. '\n 'Return code 0 means nothing would change. '\n 'Return code 1 means some files would be reformatted.'\n ),\n )\n\n args = parser.parse_args()\n\n sys.exit(0) if all(\n [prettified_yaml(filename, args.check) for filename in args.files]\n ) or not args.check else sys.exit(1)\n\n\nif __name__ == '__main__':\n prettier_main()\n", "sub_path": "komodo/prettier.py", "file_name": "prettier.py", "file_ext": "py", "file_size_in_byte": 5009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 48, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml.YAML", "line_number": 62, "usage_type": "call"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 62, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 70, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 70, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml.comments.CommentedMap", "line_number": 77, "usage_type": "call"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 77, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 77, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 81, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 81, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.StringIO", "line_number": 83, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 90, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 151, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "227350987", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import neighbors\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\niris = load_iris()\nX = iris.data[:, :2] # Choosing only the first two input-features\ny = iris.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\n\n# Visualizing the training data\nX_class0 = np.asmatrix(\n [X_train[i] for i in range(len(X_train)) if y_train[i] == 0]) # Picking only the first two classes\nY_class0 = np.zeros((X_class0.shape[0]), dtype=np.int)\nX_class1 = np.asmatrix([X_train[i] for i in range(len(X_train)) if y_train[i] == 1])\nY_class1 = np.ones((X_class1.shape[0]), dtype=np.int)\nX_class2 = np.asmatrix([X_train[i] for i in range(len(X_train)) if y_train[i] == 2])\nY_class2 = np.full((X_class2.shape[0]), fill_value=2, dtype=np.int)\n\nX_class0 = np.array(X_class0)\nX_class1 = np.array(X_class1)\nX_class2 = np.array(X_class2)\nplt.scatter(X_class0[:, 0], X_class0[:, 1], color='red')\nplt.scatter(X_class1[:, 0], X_class1[:, 1], color='blue')\nplt.scatter(X_class2[:, 0], X_class2[:, 1], color='green')\nplt.xlabel('sepal length')\nplt.ylabel('sepal width')\nplt.legend(['class 0', 'class 1', 'class 2'])\nplt.title('Fig 3: Visualization of training data')\nplt.show()\n\nmodel = neighbors.KNeighborsClassifier(n_neighbors=5) # K = 5\nmodel.fit(X_train, y_train)\n\nquery_point = np.array([5.9, 2.9])\ntrue_class_of_query_point = 1\npredicted_class_for_query_point = model.predict([query_point])\nprint(\"Query point: {}\".format(query_point))\nprint(\"True class of query point: {}\".format(true_class_of_query_point))\n\nneighbors_object = neighbors.NearestNeighbors(n_neighbors=5)\nneighbors_object.fit(X_train)\ndistances_of_nearest_neighbors, indices_of_nearest_neighbors_of_query_point = neighbors_object.kneighbors([query_point])\nnearest_neighbors_of_query_point = X_train[indices_of_nearest_neighbors_of_query_point[0]]\nprint(\"The query point is: {}\\n\".format(query_point))\nprint(\"The nearest neighbors of the query point are:\\n {}\\n\".format(nearest_neighbors_of_query_point))\nprint(\"The classes of the nearest neighbors are: {}\\n\".format(y_train[indices_of_nearest_neighbors_of_query_point[0]]))\nprint(\"Predicted class for query point: {}\".format(predicted_class_for_query_point[0]))\n\nplt.scatter(X_class0[:, 0], X_class0[:, 1], color='red')\nplt.scatter(X_class1[:, 0], X_class1[:, 1], color='blue')\nplt.scatter(X_class2[:, 0], X_class2[:, 1], color='green')\nplt.scatter(query_point[0], query_point[1], marker='^', s=75, color='black')\nplt.scatter(nearest_neighbors_of_query_point[:, 0], nearest_neighbors_of_query_point[:, 1], marker='s', s=150,\n color='yellow', alpha=0.30)\nplt.xlabel('sepal length')\nplt.ylabel('sepal width')\nplt.legend(['class 0', 'class 1', 'class 2'])\nplt.title('Fig 3: Working of the K-NN classification algorithm')\nplt.show()\n\ntest_set_predictions = [model.predict(X_test[i].reshape((1, len(X_test[i]))))[0] for i in range(X_test.shape[0])]\ntest_misclassification_percentage = 0\nfor i in range(len(test_set_predictions)):\n if test_set_predictions[i] != y_test[i]:\n test_misclassification_percentage += 1\ntest_misclassification_percentage *= 100 / len(y_test)\n\nprint(\"Evaluating K-NN classifier:\")\nprint('test misclassification percentage = {}%'.format(test_misclassification_percentage))\n", "sub_path": "KNearestNeighbours.py", "file_name": "KNearestNeighbours.py", "file_ext": "py", "file_size_in_byte": 3360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.asmatrix", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.asmatrix", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.asmatrix", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "141477682", "text": "import requests\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nimport os\n\nclass VirustotalSubdomainFinder:\n def grab_virustotal_subdomains_for_domain(domain):\n\n try:\n key = os.environ['VIRUSTOTAL_API_KEY']\n api_url = f'https://www.virustotal.com/vtapi/v2/domain/report?apikey={key}&domain={domain}'\n api_response = requests.get(api_url, verify=False)\n subdomains = api_response.json()['subdomains']\n return subdomains\n except Exception as e:\n print(e)\n", "sub_path": "assetmon/managers/tools/virustotal.py", "file_name": "virustotal.py", "file_ext": "py", "file_size_in_byte": 571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 3, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "366305585", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nif __name__ == \"__main__\":\n\tfrom models.accountLinkedin import *\n\tfrom models.accountCompany import *\n\tfrom utils.utils import *\n\tfrom seleniumClass.managerSelenium import SeleniumManager\n\tfrom seleniumClass.seleniumClientLinkedin import ClientLinkedin\n\tfrom settings.settingsLinkedin import *\nelse:\n\tfrom .models.accountLinkedin import *\n\tfrom .models.accountCompany import *\n\tfrom .utils.utils import *\n\tfrom .seleniumClass.managerSelenium import SeleniumManager\n\tfrom .seleniumClass.seleniumClientLinkedin import ClientLinkedin\n\tfrom .settings.settingsLinkedin import *\n\nfrom datetime import datetime\nimport sys\nimport argparse\nimport time\nimport os\nimport bs4\nimport platform\n\ndef standardUrl(url, company=False):\n\n\tif company:\n\t\ttab = url.split(\"linkedin.com\")\n\t\treturn \"https://www.linkedin.com\"+tab[1]\n\telse:\n\t\tif not \"linkedin.com/in\" in url:\n\t\t\treturn None\n\n\t\ttab = url.split(\"linkedin.com/in\")\n\n\t\t\"\"\" we ignore the string after the /name/ \"\"\"\n\t\ttab2 = tab[1].split(\"/\")\n\n\t\treturn \"https://www.linkedin.com/in/\"+tab2[1]\n\n#class permettant d'effectuer les recherches de personnes / scrapping d'information via Selenium sur Linkedin\nclass SearcherLinkedin:\n\n\tdef __init__(self, manager):\n\t\tself.manager = manager\n\t\tliclient = ClientLinkedin(self.manager.driver, search_keys)\n\t\tself.manager.connection(liclient)\n\n\t\"\"\" Effectuer le scrapping sur une page de recherche, récupérant les liens, sur Linkedin et va a la page suivant si possible\n\t\tUtiliez par la méthode findLinkedinsKeyWord(self, keywords)\n\t\"\"\"\n\tdef findLinkedinsScrapping(self):\n\n\t\t#Chargement de la page /!\\ \n\t\ttime.sleep(2)\n\t\t\n\t\t#On scroll histoire que la page soit charger pour le scrapping (sinon rique de manquer des elements)\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight/2);\")\n\t\ttime.sleep(1)\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\ttime.sleep(1)\n\n\t\thtml=self.manager.driver.page_source\n\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\")\n\n\t\tsame=soup.find_all('a', class_='search-result__result-link')\n\n\t\tliste = []\n\t\ta=0\n\t\tfor elem in same:\n\t\t\tliste.append('https://www.linkedin.com'+elem.get('href'))\n\n\t\tnext_page = soup.find_all('ol', class_='results-paginator')\n\t\tfor elem in next_page:\n\t\t\tsuivant=elem.find_all('button', class_='next')\n\t\t\tif (len(suivant)==1):\n\t\t\t\t#dans le cas ou il croit avoir trouvé un button ... ouais ça arrive si la connexion est trop lente\n\t\t\t\ttry:\n\t\t\t\t\tself.manager.driver.find_element_by_css_selector('button.next').click()\n\t\t\t\t\tliste = liste + self.findLinkedinsScrapping()\n\t\t\t\texcept:\n\t\t\t\t\tbreak\n\t\treturn set(liste)\n\n\t\"\"\" effectue une recherche avec des mots clés deja composé par exemples, frank candido president \"\"\"\n\tdef findLinkedinsKeyWord(self, keywords):\n\t\t\"\"\" fait une recherche avec les mots clefs, replace les espaces par un %20 pour qu'ils fonctionnent dans l'url \"\"\"\n\t\tkey=\"keywords=\"\n\t\tkeywords=keywords.strip()\n\t\tprofile_link=\"https://www.linkedin.com/search/results/people/?%s%s\" % (key, keywords.replace(' ','%20'))\n\n\t\tself.manager.get(profile_link, 3)\n\t\treturn self.findLinkedinsScrapping()\n\n\t\"\"\" effectue une recherche avec des mots clés dans une liste par exemples, liste={frank, candido, president} \"\"\"\n\tdef findLinkedinsByKeywordsByList(self, liste):\n\t\tstr_keywords = \"\"\n\t\tfor val in liste:\n\t\t\tstr_keywords = str_keywords + val + \" \"\n\t\treturn self.findLinkedinsKeyWord(str_keywords)\n\n\t\"\"\" effectue une recherche linkedin avec des informtions précise ecole et entreprise son optionnel \"\"\"\n\tdef findLinkedins(self, nom, prenom, ecole=None, entreprise=None):\n\t\t\"\"\"\n\t\t\tUsage :\n\t\t\tecole=\"str\", entreprise=\"str\" qui sont des paramètres optionnel\n\t\t\"\"\"\n\t\trecherche_nom= \"lastName=\"\n\t\trecherche_prenom = \"firstName=\"\n\t\tprofile_link=\"https://www.linkedin.com/search/results/people/?\"+recherche_nom+nom+\"&\"+recherche_prenom+prenom\n\n\t\tif ecole is not None:\n\t\t\trecherche_ecole=\"school=%s\" % ecole\n\t\t\tprofile_link+= \"&\"+recherche_ecole\n\t\tif entreprise is not None:\n\t\t\trecherche_entreprise=\"company=%s\" % entreprise\n\t\t\tprofile_link+= \"&\"+recherche_entreprise\n\n\n\t\tself.manager.get(profile_link, 3)\n\n\t\treturn self.findLinkedinsScrapping()\n\n\tdef findLinkedin(self, nom, prenom, url, file_tmp):\n\n\t\tcompte = CompteLinkedin(nom, prenom, url)\n\n\t\t\"\"\" pause 0 car on doit defiler vers le bas avant de faire la pause\"\"\"\n\t\tself.manager.get(url, 0)\n\t\t\n\t\t#on charge le haut de la page\n\t\ttime.sleep(3)\n\t\t#on scrolle vers le bas pour faire un chargement des centres d'interet\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t# on charge le bas de la page\n\t\ttime.sleep(3)\n\n\t\thtml=self.manager.driver.page_source\n\n\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\") #specify parser or it will auto-select for you\n\n\t\t#Education\n\t\tvaleurs = soup.find_all('section', class_='education-section')\n\t\tif file_tmp != \"\":\n\t\t\tfile_tmp.write(\"-------------------------Education/Etude-------------------------\\n\")\n\t\tif(len(valeurs)==0):\n\t\t\tif file_tmp != \"\":\n\t\t\t\tfile_tmp.write('Empty\\n')\n\t\telse:\n\t\t\tres=\"\"\n\t\t\tfor elem in valeurs:\n\t\t\t\telem_valeurs = elem.find_all('li')\n\t\t\t\tfor e in elem_valeurs:\n\t\t\t\t\tif(e.get_text() != '') :\n\t\t\t\t\t\ttmp = formater(e.get_text())\n\t\t\t\t\t\tcompte.addEtude(d(tmp))\n\t\t\t\t\t\tres = res + '\\n\\n' + tmp\n\t\t\tif file_tmp != \"\":\n\t\t\t\tecriturePython2_Python3(file_tmp, res)\n\t\t\t\tfile_tmp.write('\\n\\n')\n\n\t\t#Favoris\n\t\tif file_tmp != \"\":\n\t\t\tfile_tmp.write(\"\\n-------------------------Favoris-------------------------\\n\")\n\t\tvaleurs = soup.find_all('li', class_='pv-interest-entity')\n\t\tfor elem in valeurs:\n\t\t\tif(elem.get_text()!= ''):\n\t\t\t\ttmp = formater(elem.get_text())\n\t\t\t\tcompte.addFavori(d(tmp))\n\t\t\t\tif file_tmp != \"\":\n\t\t\t\t\tecriturePython2_Python3(file_tmp, tmp)\n\t\t\t\t\tfile_tmp.write('\\n\\n')\n\n\t\t# Recuperation en dur des experiences\n\t\texperiences = []\n\t\tvaleurs = soup.find_all('section', class_='experience-section')\n\n\t\tif file_tmp != \"\":\n\t\t\tfile_tmp.write(\"\\n-------------------------Experiences-------------------------\\n\")\n\t\tif(len(valeurs)==0):\n\t\t\tif file_tmp != \"\":\n\t\t\t\tfile_tmp.write('Empty\\n')\n\t\telse:\n\t\t\t\"\"\"depuis un tableau de type soup, On récupère la liste de tag li qu'on formatte pour l'affichage \n\t\t\tCette fonction est utilisé pour la partie education et expérience\"\"\"\n\t\t\tres=\"\"\n\t\t\tfor elem in valeurs:\n\t\t\t\telem_valeurs = elem.find_all('li')\n\t\t\t\tfor e in elem_valeurs:\n\t\t\t\t\tif(e.get_text() != '') :\n\t\t\t\t\t\ttmp = formater(e.get_text())\n\t\t\t\t\t\texperiences.append(d(tmp))\n\t\t\t\t\t\tres = res + '\\n\\n' + tmp\n\t\t\tif file_tmp != \"\":\n\t\t\t\tecriturePython2_Python3(file_tmp, res)\n\t\t\t\tfile_tmp.write('\\n\\n')\n\n\t\t#Recuperation des logos d'entreprises et des urls d'entreprises correspondantes\n\t\turlsExperiences = []\n\t\tsoupImageEntreprise = soup.select('.pv-profile-section.experience-section.ember-view a')\n\t\tfor elem in soupImageEntreprise:\n\t\t\tif elem.get_text() != '':\n\n\t\t\t\t#By default address url empty\n\t\t\t\tif \"company\" in elem.get('href'):\n\t\t\t\t\turlsExperiences.append(\"https://www.linkedin.com\"+elem.get('href'))\n\t\t\t\telse:\n\t\t\t\t\turlsExperiences.append(\"\")\n\n\t\t#Parcourt des experiences en dur et des urls d'entreprises lier\n\t\tnumExp = 0\n\t\tfor experience in experiences:\n\n\t\t\t#Default variable\n\t\t\tnom = \"\"\n\t\t\tnomE = \"\"\n\t\t\tdate = \"\"\n\t\t\tlocation = \"\"\n\t\t\tdescription = \"\"\n\t\t\tactif = True\n\t\t\tdomaine = \"\"\n\t\t\tdescriptionE = \"\"\n\n\t\t\tstr_tab=experience.split(\"\\n\")\n\t\t\tligne = 0\n\t\t\tfor strExp in str_tab :\n\t\t\t\tstrDecode = d(strExp)\n\t\t\t\tstrExpLow = strDecode.lower()\n\t\t\t\tstrExpLow_tab=strExpLow.split(\" \")\n\n\t\t\t\t#print(strDecode)\n\t\t\t\t#Instanciation of the experience job: (first ligne not empty)\n\t\t\t\tif ligne == 0:\n\t\t\t\t\tnom = strDecode\n\n\t\t\t\t#Instanciation of the date\n\t\t\t\tif \"dates\" == strExpLow_tab[0]:\n\t\t\t\t\tdate = strDecode[16:]\n\t\t\t\t\tactif = False\n\t\t\t\t\tfor var in [\"aujourd\", \"present\", \"now\", \"today\"]:\n\t\t\t\t\t\tif var in strExpLow:\n\t\t\t\t\t\t\tactif = True\n\t\n\t\t\t\t#Instanciation of the name of the entreprise\n\t\t\t\tif \"company\" == strExpLow_tab[0]:\n\t\t\t\t\tnomE = strDecode[13:]\n\n\t\t\t\t#Instanciation of the geolocalisation\n\t\t\t\tif \"location\" == strExpLow_tab[0]:\n\t\t\t\t\tlocation = strDecode.replace(\"Lieu \", \"\")\n\n\t\t\t\t#Instanciation of the description\n\t\t\t\tif (ligne>0) & (not strExpLow_tab[0] in ['duree', 'dates', 'nom', 'lieu']):\n\t\t\t\t\tdescription = strDecode\n\t\t\t\t\n\t\t\t\t# ++ si la ligne n'est pas vide\n\t\t\t\tif not strExp==\"\":\n\t\t\t\t\tligne = ligne + 1\n\n\t\t\t#Si un logo entreprise existe.\n\t\t\tif not urlsExperiences[numExp] == \"\":\n\t\t\t\t# wait for page load=3\n\t\t\t\tself.manager.get(urlsExperiences[numExp], 3)\n\t\t\t\thtml=self.manager.driver.page_source\n\t\t\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\") #specify parser or it will auto-select for you\n\t\t\t\tdivnom = soup.select('.org-top-card-module__name')\n\t\t\t\tdivdomaine = soup.select('.company-industries.org-top-card-module__dot-separated-list')\n\t\t\t\tdivlocation = soup.select('.org-top-card-module__location')\n\t\t\t\tdivdescription = soup.select('.org-about-us-organization-description p')\n\n\t\t\t\t#On preferera le nom et la localisation donner sur la page de l'entreprise si elle existe.\n\t\t\t\tfor elem in divnom:\n\t\t\t\t\tnomE = d(elem.get_text().strip(\"\\n \\r\"))\n\t\t\t\tfor elem in divlocation:\n\t\t\t\t\tlocation = d(elem.get_text().strip(\"\\n \\r\"))\n\t\t\t\tfor elem in divdomaine:\n\t\t\t\t\tdomaine = d(elem.get_text().strip(\"\\n \\r\"))\n\t\t\t\tfor elem in divdescription:\n\t\t\t\t\tdescriptionE = d(elem.get_text().strip(\"\\n \\r\"))\n\n\t\t\tcompte.addExperience(nom, date, location, description, actif, urlsExperiences[numExp], nomE, descriptionE, domaine)\n\n\t\t\t#++\n\t\t\tnumExp = numExp + 1\n\n\t\treturn compte\n\n\tdef findLinkedinCompany(self, link):\n\t\tself.manager.get(link, 0)\n\t\t#wait top page loading\n\t\ttime.sleep(3)\n\t\t#scroll down\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t# wait bottom page loading\n\t\ttime.sleep(3)\n\t\taccountCompanyLinkedin = AccountCompany(\"\",link)\n\t\thtml=self.manager.driver.page_source\n\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\") #specify parser or it will auto-select for you\n\t\tdivnom = soup.select('.org-top-card-module__name')\n\t\tdivdomaine = soup.select('.company-industries.org-top-card-module__dot-separated-list')\n\t\tdivlocation = soup.select('.org-top-card-module__location')\n\t\tdivdescription = soup.select('.org-about-us-organization-description p')\n\n\t\t#On preferera le nom et la localisation donner sur la page de l'entreprise si elle existe.\n\t\tfor elem in divnom:\n\t\t\taccountCompanyLinkedin.nomComplet = d(elem.get_text().strip(\"\\n \\r\"))\n\t\tfor elem in divlocation:\n\t\t\taccountCompanyLinkedin.position = d(elem.get_text().strip(\"\\n \\r\"))\n\t\tfor elem in divdomaine:\n\t\t\taccountCompanyLinkedin.domaine = d(elem.get_text().strip(\"\\n \\r\"))\n\t\tfor elem in divdescription:\n\t\t\taccountCompanyLinkedin.description = d(elem.get_text().strip(\"\\n \\r\"))\n\n\t\treturn accountCompanyLinkedin\n\nif __name__ == '__main__':\n\tmanager = SeleniumManager(3)\n\tsearch = SearcherLinkedin(manager)\n\tliste = search.findLinkedinsKeyWord(\"frank candido president\")\n\tliste = list(liste)\n\t#liste = search.findLinkedins(\"candido\", \"frank\", entreprise=\"nuran\")\n\t#test pour cas plusieurs page = nbr résultat = 13\n\t#liste = search.findLinkedins(\"Legros\", \"camille\")\n\n\tfile_tmp = \"\"\n\tname_date_file = datetime.now().strftime('%H%M%d%m%Y')\n\tif sys.version_info >= (3, 0):\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_recherche'+name_date_file+'.log', 'w+', encoding=\"utf8\")\n\telse:\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_recherche'+name_date_file+'.log', 'w+')\n\tfor val in liste:\n\t\tprint(val)\n\t\tecriturePython2_Python3(file_tmp, val)\n\t\tfile_tmp.write('\\n')\n\tfile_tmp.close()\n\n\tfile_tmp = \"\"\n\tif sys.version_info >= (3, 0):\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_info'+name_date_file+'.log', 'w+', encoding=\"utf8\")\n\telse:\n\t\t#cas ou c'est en python2, il faudra dire que l'encodage sera fait en utf8 lors de l'écriture dans le fichier via str.encode(utf8) (qui fonctionne pas en python3 sinon c'est pas drole)\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_info'+name_date_file+'.log', 'w+')\n\n\tif len(liste) > 0 :\n\t\tcompte = search.findLinkedin(\"candido\", \"frank\", liste[0], file_tmp)\n\t\tcompte.homonymes = liste[1:]\n\t\tfor experience in compte.experiences:\n\t\t\tif platform.system() == \"Windows\":\n\t\t\t\tfile_tmp.write('\\n\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"date:\"+experience.date+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"description:\"+experience.description+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"urlEntreprise:\"+experience.urlEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"nomExperience:\"+experience.nomExperience+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"nomEntreprise:\"+experience.nomEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"geolocalisation:\"+experience.geolocalisation+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"descriptionE:\"+experience.descriptionEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"domaine:\"+experience.domaineEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"expActif? %s \\n\" % experience.actif)\n\n\t\t\telse:\n\t\t\t\tprint(\"date:\",experience.date)\n\t\t\t\tprint(\"description\", experience.description)\n\t\t\t\tprint(\"urlEntreprise\", experience.urlEntreprise)\n\t\t\t\tprint(\"nomExperience\", experience.nomExperience)\n\t\t\t\tprint(\"nomEntreprise\", experience.nomEntreprise)\n\t\t\t\tprint(\"geolocalisation\", experience.geolocalisation)\n\t\t\t\tprint(\"descriptionE\", experience.descriptionEntreprise)\n\t\t\t\tprint(\"domaine\", experience.domaineEntreprise)\n\t\t\t\tprint(\"expActif?\", experience.actif)\n\tfile_tmp.close()\n\tmanager.driver_quit()\n", "sub_path": "project/libraries/SNScrapping/sLinkedin.py", "file_name": "sLinkedin.py", "file_ext": "py", "file_size_in_byte": 13467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "seleniumClass.seleniumClientLinkedin.ClientLinkedin", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 137, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 141, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 145, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 270, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 296, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 300, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 303, "usage_type": "call"}, {"api_name": "seleniumClass.managerSelenium.SeleniumManager", "line_number": 322, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 331, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 331, "usage_type": "name"}, {"api_name": "sys.version_info", "line_number": 332, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 343, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 353, "usage_type": "call"}]} +{"seq_id": "68218009", "text": "__author__ = 'ecrisostomo'\n\nimport binascii\nimport hmac\nimport hashlib\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\nfrom stormpath.util import is_default_port, encode_url, str_query_string\n\nclass Sauthc1Signer:\n\n HOST_HEADER = \"Host\"\n AUTHORIZATION_HEADER = \"Authorization\"\n STORMPATH_DATE_HEADER = \"X-Stormpath-Date\"\n ID_TERMINATOR = \"sauthc1_request\"\n ALGORITHM = \"HMAC-SHA-256\"\n AUTHENTICATION_SCHEME = \"SAuthc1\"\n SAUTHC1_ID = \"sauthc1Id\"\n SAUTHC1_SIGNED_HEADERS = \"sauthc1SignedHeaders\"\n SAUTHC1_SIGNATURE = \"sauthc1Signature\"\n DATE_FORMAT = \"%Y%m%d\"\n TIMESTAMP_FORMAT = \"%Y%m%dT%H%M%SZ\"\n NL = \"\\n\"\n\n def sign_request(self, request, api_key):\n\n time = datetime.utcnow()\n time_stamp = time.strftime(self.TIMESTAMP_FORMAT)\n date_stamp = time.strftime(self.DATE_FORMAT)\n\n nonce = str(uuid4())\n\n parsed_url = urlparse(request.href)\n\n # SAuthc1 requires that we sign the Host header so we\n # have to have it in the request by the time we sign.\n host_header = parsed_url.hostname\n\n if not is_default_port(parsed_url):\n host_header = parsed_url.netloc\n\n request.http_headers[self.HOST_HEADER] = host_header\n\n request.http_headers[self.STORMPATH_DATE_HEADER] = time_stamp\n\n method = request.http_method\n canonical_resource_path = self._canonicalize_resource_path_(parsed_url.path)\n canonical_query_string = self._canonicalize_query_string_(request)\n canonical_headers_string = self._canonicalize_headers_(request)\n signed_headers_string = self._get_signed_headers_(request)\n request_payload_hash_hex = self._hash_hex_(self._get_request_payload_(request))\n\n canonical_request = ''.join((method, self.NL,\n canonical_resource_path, self.NL,\n canonical_query_string, self.NL,\n canonical_headers_string, self.NL,\n signed_headers_string, self.NL,\n request_payload_hash_hex))\n\n id = ''.join((api_key.id, \"/\", date_stamp, \"/\", nonce, \"/\", self.ID_TERMINATOR))\n\n canonical_request_hash_hex = self._hash_hex_(canonical_request)\n\n string_to_sign = ''.join((self.ALGORITHM, self.NL,\n time_stamp, self.NL,\n id, self.NL,\n canonical_request_hash_hex))\n\n # SAuthc1 uses a series of derived keys, formed by hashing different pieces of data\n k_secret = ''.join((self.AUTHENTICATION_SCHEME, api_key.secret))\n k_date = self._sign_(date_stamp, k_secret)\n k_nonce = self._sign_(nonce, k_date)\n k_signing = self._sign_(self.ID_TERMINATOR, k_nonce)\n\n signature = self._sign_(string_to_sign, k_signing)\n signature_hex = binascii.hexlify(signature).decode()\n\n authorization_header = ''.join((self.AUTHENTICATION_SCHEME, \" \",\n self._create_name_value_pair_(self.SAUTHC1_ID, id), \", \",\n self._create_name_value_pair_(self.SAUTHC1_SIGNED_HEADERS, signed_headers_string), \", \",\n self._create_name_value_pair_(self.SAUTHC1_SIGNATURE, signature_hex)))\n\n request.http_headers[self.AUTHORIZATION_HEADER] = authorization_header\n\n def _create_name_value_pair_(self, name, value):\n return ''.join((name, '=', value))\n\n def _sign_(self, data, key):\n\n try:\n byte_key = key.encode()\n except:\n byte_key = key\n\n return hmac.new(byte_key, data.encode(), hashlib.sha256).digest()\n\n def _hash_hex_(self, text):\n return hashlib.sha256(text.encode()).hexdigest()\n\n def _get_request_payload_(self, request):\n return self._get_request_payload_without_query_params_(request)\n\n def _get_request_payload_without_query_params_(self, request):\n\n result = ''\n\n if request.body:\n result = request.body\n\n return result\n\n def _get_signed_headers_(self, request):\n\n sorted_headers = OrderedDict(sorted(request.http_headers.items()))\n\n result = ''\n\n for header in sorted_headers.copy().keys():\n\n if result:\n result += ';' + header\n else:\n result += header\n\n return result.lower()\n\n def _canonicalize_headers_(self, request):\n\n sorted_headers = OrderedDict(sorted(request.http_headers.items()))\n\n result = ''\n\n for key, value in sorted_headers.items():\n\n result += ''.join((str(key).lower(), ':', value))\n result += self.NL\n\n return result\n\n def _canonicalize_query_string_(self, request):\n return str_query_string(request.query_string)\n\n def _canonicalize_resource_path_(self, resource_path):\n\n if resource_path:\n return encode_url(resource_path)\n else:\n return '/'\n", "sub_path": "stormpath/http/authc.py", "file_name": "authc.py", "file_ext": "py", "file_size_in_byte": 5132, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 35, "usage_type": "call"}, {"api_name": "stormpath.util.is_default_port", "line_number": 41, "usage_type": "call"}, {"api_name": "binascii.hexlify", "line_number": 78, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 97, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 97, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 116, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 131, "usage_type": "call"}, {"api_name": "stormpath.util.str_query_string", "line_number": 143, "usage_type": "call"}, {"api_name": "stormpath.util.encode_url", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "245220064", "text": "# encoding=utf-8\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\nimport scrapy\r\nfrom getpics.items import GetpicsItem, GetTitleItem\r\nfrom scrapy import log\r\nfrom scrapy.http import Request\r\n\r\n\r\nclass MySpider(scrapy.Spider):\r\n\tname = \"kook\"\r\n\tallowed_domains = [\"8264.com\"]\r\n\tstart_urls = [\r\n\t\t\"http://www.8264.com/list/871/\"\r\n\t]\r\n\tdownload_delay = 2\r\n\r\n\r\n\tdef parse(self, response):\r\n\t\titems = []\r\n\t\tfor sel in response.xpath('//div[@class=\"bbslistone\"]'):\r\n\t\t\titem = GetTitleItem()\r\n\t\t\t# item['image_urls'] = sel.xpath('a/img/@src').extract()[0]\r\n\t\t\t# item['image_paths'] = sel.xpath('div/a/text()').extract()[0]\r\n\t\t\t# 抓取title\r\n\t\t\ttitle = sel.xpath('div[@class=\"bbslistone_name\"]/a/text()').extract()[0]\r\n\t\t\tif len(title) == 0:\r\n\t\t\t\tlog.msg(\"fecth title failed\")\r\n\t\t\t\tcontinue\r\n\t\t\titem['title'] = title\r\n\t\t\ttitle_url = sel.xpath('a/@href').extract()[0]\r\n\t\t\t# 抓取title_url\r\n\t\t\tif len(title_url) == 0:\r\n\t\t\t\tlog.msg(\"fecth title_url failed\")\r\n\t\t\t\tcontinue\r\n\t\t\titem['title_url'] = title_url\r\n\t\t\titems.append(item)\r\n\t\t\tyield Request(item['title_url'], callback=self.parse_item, meta={'item':item})\r\n\r\n\t\t# for item in items:\r\n\t\t# \tyield Request(item['title_url'], callback=self.parse_item, meta={'item':item})\r\n\t\t# \tyield Request(item['title_url'], callback=self.parse_item)\r\n\t\t# \tyield item\r\n\r\n\r\n\r\n\r\n\tdef parse_item(self, response):\r\n\t\t# 获得由parse传来的item\r\n\t\t# item = response.meta['item']\r\n\t\t# 抓取image_urls\r\n\t\t# print response.body\r\n\r\n\r\n\t\tfor sel in response.xpath('//div[@class=\"t_fsz_new \"]'):\r\n\t\t\tfor img in sel.xpath('//img[@class=\"zoom\"]'):\r\n\r\n\t\t\t\titem =GetpicsItem()\r\n\t\t\t\timg_url = img.xpath('@file').extract()\r\n\t\t\t\tif len(img_url) == 0:\r\n\t\t\t\t\tlog.msg(\"there is no pics in the topic \")\r\n\t\t\t\t\treturn\r\n\t\t\t\ttitle = img.xpath('@title').extract()\r\n\t\t\t\tif len(title) == 0:\r\n\t\t\t\t\tlog.msg(\"the pic has no title\")\r\n\t\t\t\t\treturn\r\n\t\t\t\titem['image_urls'] = img_url\r\n\t\t\t\titem['title'] = title\r\n\t\t\t\tyield item\r\n\r\n\r\n\r\n\t\t# 抓取author\r\n\t\t# author", "sub_path": "getpics/getpics/spiders/MySpider.py", "file_name": "MySpider.py", "file_ext": "py", "file_size_in_byte": 1967, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 4, "usage_type": "call"}, {"api_name": "scrapy.Spider", "line_number": 11, "usage_type": "attribute"}, {"api_name": "getpics.items.GetTitleItem", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.log.msg", "line_number": 29, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 29, "usage_type": "name"}, {"api_name": "scrapy.log.msg", "line_number": 35, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 35, "usage_type": "name"}, {"api_name": "scrapy.http.Request", "line_number": 39, "usage_type": "call"}, {"api_name": "getpics.items.GetpicsItem", "line_number": 59, "usage_type": "call"}, {"api_name": "scrapy.log.msg", "line_number": 62, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 62, "usage_type": "name"}, {"api_name": "scrapy.log.msg", "line_number": 66, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "521564570", "text": "'''\nReturns total price paid for individual rentals\n'''\nimport argparse\nimport json\nimport datetime\nimport math\nimport logging\n\nLOG_FILE = datetime.datetime.now().strftime(\"%Y-%m-%d\")+'_charges_calc.log'\nLOG_FORMAT = \"%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s\"\nFORMATTER = logging.Formatter(LOG_FORMAT)\n\nFILE_HANDLER = logging.FileHandler(LOG_FILE)\nFILE_HANDLER.setFormatter(FORMATTER)\n\nCONSOLE_HANDLER = logging.StreamHandler(LOG_FILE)\nCONSOLE_HANDLER.setFormatter(FORMATTER)\n\nLOGGER = logging.getLogger()\n\ndef parse_cmd_arguments():\n ''' Argumantes to pass '''\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='ouput JSON file', required=True)\n parser.add_argument('-d', '--debug', help='Debug level: 0:info, 1: error, 2: warn, 3: debug', default=0, required=False)\n return parser.parse_args()\n\n\ndef load_rentals_file(filename):\n ''' Loads rentals file data '''\n with open(filename) as file:\n try:\n LOGGER.info('loading JSON file %s', filename)\n return json.load(file)\n except ValueError as err:\n LOGGER.error('Failed to load JSON data: %s', err)\n exit(0)\n\n\ndef calculate_additional_fields(d_data):\n ''' Calculates additional fields '''\n LOGGER.info('Calculating additional fields')\n\n for value in d_data.values():\n try:\n rental_start = datetime.datetime.strptime(value['rental_start'], '%m/%d/%y')\n rental_end = datetime.datetime.strptime(value['rental_end'], '%m/%d/%y')\n except ValueError as err:\n LOGGER.warning(\"Failed to calculate time from %s or %s. ERROR: %s\", value['rental_start'], value['rental_end'], err)\n continue\n\n\n\n if rental_start >= rental_end:\n LOGGER.warning(\"Rental starts %s on or after ends %s in %s. Skip.\", rental_start, rental_end, value)\n continue\n\n try:\n value['total_days'] = (rental_end - rental_start).days\n value['total_price'] = value['total_days'] * value['price_per_day']\n value['sqrt_total_price'] = math.sqrt(value['total_price'])\n value['unit_cost'] = value['total_price'] / value['units_rented']\n except Exception as err:\n LOGGER.error('Failed Calculating additional fields: %s', err)\n exit(0)\n\n return d_data\n\ndef save_to_json(filename, data):\n ''' Save to json '''\n with open(filename, 'w') as file:\n json.dump(data, file)\n\nif __name__ == \"__main__\":\n ARGS = parse_cmd_arguments()\n DATA = load_rentals_file(ARGS.input)\n DATA1 = calculate_additional_fields(DATA)\n save_to_json(ARGS.output, DATA1)\n", "sub_path": "students/lguerrero/lesson02/assignment/src/charges_calc.py", "file_name": "charges_calc.py", "file_ext": "py", "file_size_in_byte": 2792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.now", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "json.load", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "426469045", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom args import *\nfrom model_parts import *\n\n\n'''\nModel head\n'''\nclass ModelDisigner(nn.Module):\n\tdef __init__(self):\n\t\tsuper(ModelDisigner, self).__init__()\n\t\tself.backbone = Backbone()\n\t\tself.score_branch = ScoreBranch()\n\t\tself.mask_branch = MaskBranch()\n\t\tself.up = nn.Upsample(scale_factor=2, mode='nearest')\n\t\tself.final = nn.Sequential(\n\t\t\tnn.Conv2d(1, NUM_CLASSES, kernel_size=1),\n\t\t\tnn.Sigmoid()\n\t\t\t)\n\n\tdef Correlation_func(self, s_f, t_f): # s_f-->search_feat, t_f-->target_feat\n\t\tt_f = t_f.reshape(-1, 1, t_f.size(2), t_f.size(3))\n\t\tout = s_f.reshape(1, -1, s_f.size(2), s_f.size(3)) # 1, b*ch, 32, 32\n\t\tout = F.conv2d(out, t_f, groups=t_f.size(0))\n\t\tout = out.reshape(-1, s_f.size(1), out.size(2), out.size(3))\n\t\treturn out\n\n\tdef Chiose_RoW(self, corr_feat, pos_list):\n\t\tcorr_feat = corr_feat.reshape(BATCH_SIZE, 17, 17, 256)\n\t\tj_tensors = torch.tensor([]).to(device)\n\t\tfor j in range(corr_feat.size(0)):\n\t\t\tj_tensor = corr_feat[j][pos_list[j, 0]][pos_list[j, 1]].unsqueeze(0)\n\t\t\tj_tensors = torch.cat([j_tensors, j_tensor], dim=0)\n\t\tj_tensors = j_tensors.unsqueeze(2).unsqueeze(3)\n\t\treturn j_tensors\n\n\n\tdef Choise_feat(self, feat, pos_list, x):\n\t\tfeat = feat.reshape(TIMESTEPS, BATCH_SIZE, feat.size(1), feat.size(2), feat.size(3))\n\t\tfeat = feat.permute(0, 1, 3, 4, 2)\n\n\t\ti_tensors = torch.tensor([]).to(device)\n\t\tfor i in range(feat.size(0)):\n\t\t\tj_tensors = torch.tensor([]).to(device)\n\t\t\tfor j in range(feat.size(1)):\n\t\t\t\tj_tensor = feat[i][j][x*pos_list[i][j][0]:x*pos_list[i][j][0]+x*16, x*pos_list[i][j][1]:x*pos_list[i][j][1]+x*16, :].unsqueeze(0)\n\t\t\t\tj_tensors = torch.cat([j_tensors, j_tensor], dim=0)\n\t\t\ti_tensor = j_tensors.unsqueeze(0)\n\t\t\ti_tensors = torch.cat([i_tensors, i_tensor], dim=0)\n\n\t\tfeat = i_tensors.permute(0, 1, 4, 2, 3)\n\t\tfeat = feat.reshape(TIMESTEPS*BATCH_SIZE, feat.size(2), feat.size(3), feat.size(4))\n\t\treturn feat\n\n\n\tdef forward(self, target, searchs):\n\t\t_, target_feat = self.backbone(target)\n\t\tsearch_cats, searchs_feat = self.backbone(searchs)\n\t\tcorr_feat = self.Correlation_func(searchs_feat, target_feat)\n\t\t##### Score Branch #####\n\t\tscore, pos_list = self.score_branch(corr_feat)\n\t\t# print(pos_list)\n\t\t##### Mask Branch #####\n\t\tmasks_feat = self.Chiose_RoW(corr_feat, pos_list)\n\t\tmask = self.mask_branch(masks_feat).reshape(BATCH_SIZE, 1, 64, 64)\n\t\tmask = self.up(mask)\n\t\tmask = self.final(mask)\n\t\treturn score, mask\n\n\nif __name__ == '__main__':\n\tmodel = ModelDisigner()\n\tmodel = model.to(device)\n\ttarget = torch.rand([BATCH_SIZE, 3, 128, 128]).to(device)\n\tsearchs = torch.rand([BATCH_SIZE, 3, 256, 256]).to(device)\n\tscore, mask = model(target, searchs)\n\tprint('score.shape: ', score.shape)\n\tprint('mask.shape: ', mask.shape)\n", "sub_path": "model_head_base.py", "file_name": "model_head_base.py", "file_ext": "py", "file_size_in_byte": 2745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Upsample", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "5323114", "text": "from collections import Counter\nimport h5py\nfrom itertools import combinations, permutations, product\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nfrom scipy.signal import decimate\nimport tensorflow as tf\n\n\nclass PairDataGeneratorRS(tf.keras.utils.Sequence):\n \"\"\"\n This class generates the pair data used to train/test a Siamese network,\n where train and test data come from same set of species.\n\n The pair data is restricted to come from different mice species to improve\n generality of the embedding learnt by the Siamese network.\n Pair from same classes is labeled with 0.\n Pair from different classes is labeled with 1.\n The set of sham/TBI mice must contain at least 2 unique species.\n Pair data in test set will not be in the train set.\n\n Currently, it only support num_classes = 4. Number of samples generated\n may be more than num_samples to ensure pair samples are distributed\n evenly among available pairs.\n\n Args:\n file_path: location of HDF5 files containing the EEG epochs\n file_template: template of filenamem, e.g., {}_BL5_ew32.h5\n sham_set: set of mice species with sham injury\n tbi_set: set of mice species with TBI\n purpose: purpose of generator - train or test\n batch_size: batch size\n num_classes: number of species classes\n num_samples: number of pair samples to generate\n regenerate: if True, new samples will be regenerated\n shuffle: if True, dataset are shuffled after each epoch\n decimate: decimation factor\n test_percent: percentage of pair samples used for test set\n overlap: if True, use overlapping epochs\n \"\"\"\n def __init__(self, file_path, file_template, sham_set, tbi_set,\n purpose='train', batch_size=32, num_classes=4,\n num_samples=1024, regenerate=False, shuffle=True, decimate=1,\n test_percent=20, overlap=True):\n self.file_path = file_path\n self.file_template = file_template\n self.decimate = decimate\n self.num_samples = num_samples\n assert purpose in ('train', 'test'),\\\n 'purpose must be either train or test'\n self.purpose = purpose\n assert test_percent >= 0 and test_percent <= 100,\\\n 'test_percent must be between 0 and 100'\n self.test_percent = test_percent\n\n # check that num_classes is set to 4\n assert num_classes == 4,\\\n 'Only num_classes = 4 is supported currently'\n self.num_classes = num_classes\n if num_classes == 4:\n self.stages = ['wake', 'sleep']\n self.num_class_combs = 10\n self.num_same_pairs = 4\n self.num_diff_pairs = 6\n elif num_classes == 6:\n self.stages = ['wake', 'nrem', 'rem']\n self.num_class_combs = 27\n self.num_same_pairs = 6\n self.num_diff_pairs = 21\n\n # check that sham_set and tbi_set contain at least 2 different species\n sham_set = list(Counter(sham_set))\n tbi_set = list(Counter(tbi_set))\n assert len(sham_set) > 1,\\\n \"Sham set must contain at least 2 unique species\"\n assert len(tbi_set) > 1,\\\n \"TBI set must contain at least 2 unique species\"\n self.sham_set = sham_set\n self.tbi_set = tbi_set\n\n # read from existing index file for generated samples\n # if regenerate = False; generate new index file if it does not exist\n if overlap:\n self.out_file = file_template[:-3].format('pairdata') +\\\n '_{}_{}_{}_{}.h5'.format(num_classes, batch_size, num_samples,\n test_percent)\n else:\n self.out_file = file_template[:-3].format('pairdata_novl') +\\\n '_{}_{}_{}_{}.h5'.format(num_classes, batch_size, num_samples,\n test_percent)\n if not os.path.exists(self.out_file) or regenerate:\n self._generate_labeled_pairs()\n\n # set the generator to be either train or test data generator\n num_test_samples = int(np.round(self.test_percent *\n self.num_samples / 100))\n if self.purpose == 'test':\n self.num_samples = num_test_samples\n self.df = pd.read_hdf(self.out_file, 'pair_index/test', mode='r')\n else:\n self.num_samples = num_samples - num_test_samples\n self.df = pd.read_hdf(self.out_file, 'pair_index/train', mode='r')\n assert batch_size <= self.num_samples,\\\n 'Batch size must be <= number of (train or test) samples'\n self.batch_size = batch_size\n\n # shuffle data if shuffle=True\n self.shuffle = shuffle\n self.on_epoch_end()\n\n def __len__(self):\n return int(np.ceil(self.num_samples / self.batch_size))\n\n def __getitem__(self, index):\n # set index range\n # max index is num_samples - 1 for last batch\n min_index = index * self.batch_size\n if (index + 1) * self.batch_size < self.num_samples:\n max_index = (index + 1) * self.batch_size - 1\n else:\n max_index = self.num_samples - 1\n # generate the batch\n data0 = []\n data1 = []\n labels = []\n for pidx in self.indexes[min_index:max_index + 1]:\n df = self.df\n epoch0, epoch1 = self.pair_data_from_hdf5(df.at[pidx, 'species0'],\n df.at[pidx, 'species1'],\n df.at[pidx, 'stage0'],\n df.at[pidx, 'stage1'],\n df.at[pidx, 'index0'],\n df.at[pidx, 'index1'])\n if self.decimate > 1:\n epoch0 = decimate(epoch0, self.decimate)\n epoch1 = decimate(epoch1, self.decimate)\n data0.append(epoch0)\n data1.append(epoch1)\n labels.append(df.at[pidx, 'label'])\n # convert datasets to numpy arrays\n shape = (len(data0), len(data0[0]), 1)\n data0 = np.array(data0).reshape(shape)\n data1 = np.array(data1).reshape(shape)\n labels = np.array(labels, dtype=int)\n\n return [data0, data1], labels\n\n def get_labels(self):\n labels = self.df['label'].tolist()[0:self.num_samples]\n return np.array(labels, dtype=int)\n\n def get_num_samples(self, species, stage):\n datafile = os.path.join(self.file_path,\n self.file_template.format(species))\n with h5py.File(datafile, 'r') as datafile:\n num_epoch_samples = datafile['eeg'][stage].shape[0]\n return num_epoch_samples\n\n def on_epoch_end(self):\n self.indexes = [i for i in range(self.num_samples)]\n if self.shuffle:\n np.random.shuffle(self.indexes)\n\n def pair_data_from_hdf5(self, species0, species1, stage0, stage1, idx0,\n idx1):\n file0 = os.path.join(self.file_path,\n self.file_template.format(species0))\n file1 = os.path.join(self.file_path,\n self.file_template.format(species1))\n data0 = []\n data1 = []\n with h5py.File(file0, 'r') as file0, h5py.File(file1, 'r') as file1:\n data0 = file0['eeg'][stage0][idx0]\n data1 = file1['eeg'][stage1][idx1]\n return data0, data1\n\n def _generate_labeled_pairs(self):\n if os.path.exists(self.out_file):\n os.remove(self.out_file)\n curr_train_index = 0\n curr_test_index = 0\n store = pd.HDFStore(self.out_file, mode='a', complevel=4,\n complib='zlib')\n for type in ['Sham', 'TBI', 'Both']:\n if type == 'Both':\n species_combs = list(product(self.sham_set, self.tbi_set))\n # div_factor is set to 1 since each pair of stages account\n # for one stage combination\n div_factor = 1\n else:\n if type == 'Sham':\n species_set = self.sham_set\n elif type == 'TBI':\n species_set = self.tbi_set\n species_combs = list(combinations(species_set, 2))\n # div_factor is set to 2 for 2 pairs of different stages\n # account for same stage combination in one set case\n div_factor = 2\n stage_perms = list(product(self.stages, self.stages))\n for species0, species1 in species_combs:\n for stage0, stage1 in stage_perms:\n num_epoch_samples0 = self.get_num_samples(species0, stage0)\n num_epoch_samples1 = self.get_num_samples(species1, stage1)\n if type == 'Both' or stage0 != stage1:\n num_pair_samples = int(np.ceil(self.num_samples / 2 /\n self.num_diff_pairs /\n div_factor /\n len(species_combs)))\n label = 1\n else:\n num_pair_samples = int(np.ceil(self.num_samples / 2 /\n self.num_same_pairs /\n len(species_combs)))\n label = 0\n temp_count = int(np.ceil(np.sqrt(num_pair_samples)))\n index0 = random.sample(list(range(num_epoch_samples0)),\n temp_count)\n index1 = random.sample(list(range(num_epoch_samples1)),\n temp_count)\n index_pair = random.sample(list(product(index0, index1)),\n num_pair_samples)\n index_pair = [list(t) for t in zip(*index_pair)]\n num_test_pair_samples = int(np.round(self.test_percent *\n num_pair_samples /\n 100))\n num_train_pair_samples = num_pair_samples -\\\n num_test_pair_samples\n df_train_index = list(range(curr_train_index,\n curr_train_index +\n num_train_pair_samples))\n df_test_index = list(range(curr_test_index,\n curr_test_index +\n num_test_pair_samples))\n index0 = index_pair[0][:num_train_pair_samples]\n index1 = index_pair[1][:num_train_pair_samples]\n store.append('pair_index/train',\n pd.DataFrame({'species0': species0,\n 'species1': species1,\n 'stage0': stage0,\n 'stage1': stage1,\n 'index0': index0,\n 'index1': index1,\n 'label': label},\n index=df_train_index),\n data_columns=True,\n min_itemsize={'species0': 7,\n 'species1': 7,\n 'stage0': 5,\n 'stage1': 5})\n curr_train_index += num_train_pair_samples\n index0 = index_pair[0][num_train_pair_samples:]\n index1 = index_pair[1][num_train_pair_samples:]\n store.append('pair_index/test',\n pd.DataFrame({'species0': species0,\n 'species1': species1,\n 'stage0': stage0,\n 'stage1': stage1,\n 'index0': index0,\n 'index1': index1,\n 'label': label},\n index=df_test_index),\n data_columns=True,\n min_itemsize={'species0': 7,\n 'species1': 7,\n 'stage0': 5,\n 'stage1': 5})\n curr_test_index += num_test_pair_samples\n store.close()\n", "sub_path": "pairdatageneratorrs.py", "file_name": "pairdatageneratorrs.py", "file_ext": "py", "file_size_in_byte": 13018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.keras", "line_number": 12, "usage_type": "attribute"}, {"api_name": "scipy.signal.decimate", "line_number": 49, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.read_hdf", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.read_hdf", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 114, "usage_type": "call"}, {"api_name": "scipy.signal.decimate", "line_number": 137, "usage_type": "call"}, {"api_name": "scipy.signal.decimate", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 164, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.HDFStore", "line_number": 184, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 188, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 197, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 217, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 218, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 220, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 222, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 239, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "334692324", "text": "from flask import Flask, request\nimport redis\nimport json\nfrom mirrulations.docs_filter import process_docs\nfrom mirrulations.doc_filter import process_doc\nfrom mirrulations.redis_manager import RedisManager\nimport logging\nimport io\n\n\nFORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'\nlogging.basicConfig(filename='endpoints_log.log', format=FORMAT)\nd = {'clientip': '192.168.0.1', 'user': 'FLASK'}\nlogger = logging.getLogger('tcpserver')\n\napp = Flask(__name__)\n\nversion = 'v1.3'\n\n\ndef redis_server():\n return RedisManager(redis.Redis())\n\n\n@app.route('/')\ndef default():\n \"\"\"\n Default endpoint\n :return: Returns empty json\n \"\"\"\n logger.debug('Successful API Call: %s', 'default: default endpoint', extra=d)\n return json.dumps({})\n\n\n@app.route('/get_work')\ndef get_work():\n \"\"\"\n Endpoint the user will use to get work from the queue\n client_id will be one of the parameters given for logging purposes\n :return: Returns the json containing the job_id, the type of work to be done, the work that nees to be done, and\n the version number\n \"\"\"\n logging.warning(\"Successful API Call: %s\", 'get_work: get_work', extra=d)\n logger.info('Calling API to get work...')\n if len(request.args) != 1:\n logger.debug('Exception: %s', 'get_work: Get Exception for incorrect number of parameters', extra=d)\n logger.error('Error - number of parameters incorrect')\n return 'Parameter Missing', 400\n logger.debug('Assign Variable: %s', 'get_work: attempting to get client_id', extra=d)\n client_id = request.args.get('client_id')\n logger.debug('Variable Success: %s', 'get_work: successfully retrieved the client id', extra=d)\n if client_id is None:\n logging.warning(\"Exception: %s\", 'get_work: BadParameterException, client id was none', extra=d)\n logger.error('Error - no client ID')\n return 'Bad Parameter', 400\n logger.debug('Assign Variable: %s', 'get_work: attempting to get json_info from get_work - Calling get_work', extra=d)\n json_info = redis_server().get_work()\n logger.debug('Variable Success: %s', 'get_work: successfully retrieved the json_info', extra=d)\n logger.debug('Returning: %s', 'get_work: returning json_info to client from get_work', extra=d)\n logger.info('Work retrieved')\n return json.dumps(json_info)\n\n\n@app.route('/return_docs', methods=['POST'])\ndef return_docs():\n \"\"\"\n The endpoint the client calls to return the document ids received from the regulations docs calls\n :return: Returns a string saying successful so the client knows the call was successful\n \"\"\"\n logger.debug('Successful API Call: %s', 'return_docs: return docs', extra=d)\n logger.info('Attempting to return docs to server...')\n try:\n logger.debug('Assign Variable: %s', 'return_docs: attempting to get json_info from the request', extra=d)\n json_info = request.form['json']\n logger.debug('Variable Success: %s', 'return_docs: successfully retreived json_info', extra=d)\n logger.debug('Assign Variable: %s', 'return_doc: getting the files from the file request field', extra=d)\n files = request.files['file'].read()\n logger.debug('Variable Success: %s', 'return_doc: files successfully retrieved from the return doc post',\n extra=d)\n except:\n logger.debug('Exception: %s', 'return_docs: BadParameterException for return docs', extra=d)\n logger.error('Error - bad parameter')\n return 'Bad Parameter', 400\n if json_info is None:\n logger.debug('Exception: %s', 'return_docs: PostException for return docs', extra=d)\n logger.error('Error - could not post docs')\n return 'Bad Parameter', 400\n logger.debug('Calling Function: %s', 'return_docs: return_docs calling process_docs', extra=d)\n files = io.BytesIO(files)\n process_docs(redis_server(), json.loads(json_info), files)\n logger.debug('Function Successful: %s', 'return_docs: process_docs successfully called from return_docs', extra=d)\n logger.debug('Returning: %s', 'return_docs: returning success from return_docs', extra=d)\n logger.info('Docs returned to server')\n return 'Successful!'\n\n\n@app.route('/return_doc', methods=['POST'])\ndef return_doc():\n \"\"\"\n The endpoint the client calls to return documents they received from the individual regulations doc calls\n :return: Returns a string saying successful so the client knows the call was successful\n \"\"\"\n logger.debug('Successful API Call: %s', 'return_doc: return_doc call successful', extra=d)\n logger.info('Attempting to return doc to server...')\n\n try:\n logger.debug('Assign Variable: %s', 'return_doc: getting the files from the file request field', extra=d)\n files = request.files['file'].read()\n logger.debug('Variable Success: %s', 'return_doc: files successfully retrieved from the return doc post', extra=d)\n logger.debug('Assign Variable: %s', 'return_doc: get the json_info from the post request', extra=d)\n json_info= request.form['json']\n logger.debug('Variable Success: %s', 'return_doc: json retrieved from the doc post call', extra=d)\n except:\n logger.debug('Exception: %s', 'return_doc: BadParameterException for return_doc', extra=d)\n logger.error('Error - bad parameter')\n return 'Bad Parameter', 400\n files = io.BytesIO(files)\n logger.debug('Exception: %s', 'return_doc: BadParameterException for return_doc', extra=d)\n logger.debug('Calling Function: %s', 'return_doc: call process_docs with the json and files posted to return_doc endpoint', extra=d)\n process_doc(redis_server(), json.loads(json_info), files)\n logger.debug('Function Successful: %s', 'return_doc: success from return_doc', extra=d)\n logger.debug('Returning: %s', 'return_doc: returning success from return_doc', extra=d)\n logger.info('Doc returned to server')\n return 'Successful!'\n\n\ndef generate_json(work_list):\n \"\"\"\n Given a list of values, the list will be converted into json format\n :param work_list: The list of values that will be converted into json\n :return: Returns the json formatted list\n \"\"\"\n logger.info('Converting into JSON...')\n logger.debug('Call Successful: %s', 'generate_json: generate_json called successfully', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign job_id from the work_list', extra=d)\n job_id = work_list[0]\n logger.debug('Variable Success: %s', 'generate_json: jod_id assigned', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign type from the work_list', extra=d)\n type = work_list[1]\n logger.debug('Variable Success: %s', 'generate_json: type assigned', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign data from the work_list', extra=d)\n data = work_list[2]\n logger.debug('Variable Success: %s', 'generate_json: data assigned', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign converted_json from the combination of job_id, type, and data', extra=d)\n converted_json = {\n \"job_id\": job_id,\n \"type\": type,\n \"data\": data,\n \"version\": version\n }\n logger.debug('Variable Success: %s', 'generate_json: converted_json created', extra=d)\n logger.debug(\"Returning: %s\", 'generate_json: returning converted_json', extra=d)\n logger.info('JSON conversion successful')\n return json.dumps(converted_json)\n\n\ndef run():\n app.run('0.0.0.0', '8080')\n\n\nif __name__ == '__main__':\n run()\n", "sub_path": "src/mirrulations/endpoints.py", "file_name": "endpoints.py", "file_ext": "py", "file_size_in_byte": 7555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "mirrulations.redis_manager.RedisManager", "line_number": 22, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 22, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 53, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 89, "usage_type": "call"}, {"api_name": "mirrulations.docs_filter.process_docs", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 117, "usage_type": "call"}, {"api_name": "mirrulations.doc_filter.process_doc", "line_number": 120, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 120, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "150199566", "text": "import cv2\n\n\ndef main():\n image1 = cv2.imread(\"./images/X2.png\", 0)\n _, threshould = cv2.threshold(image1, 0, 255, cv2.THRESH_OTSU)\n write_name = './images/X_thr.png'\n cv2.imwrite(write_name, threshould)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "Blavo/image_make.py", "file_name": "image_make.py", "file_ext": "py", "file_size_in_byte": 256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "69459559", "text": "import pygame\nfrom pygame.sprite import Sprite\n\nclass Guai(Sprite):\n def __init__(self,screen,ai_settings):\n super().__init__()\n self.screen=screen\n self.ai_settings=ai_settings\n\n self.image=pygame.image.load('dahong.png')\n #创建矩形\n self.rect=self.image.get_rect()\n \n #需要初始位置\n self.rect.left=float(0.5*self.rect.width)\n self.rect.top=float(self.rect.height)\n\n #需要初始方向,class调用可Settings,这样function不再调用Settings\n self.guai_direction=ai_settings.guai_direction\n\n #检查撞墙\n def check_edge(self):\n screen_rect=self.screen.get_rect()#必须重新创建,然后存储到一个对象中\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= 0:\n return True\n\n #横向移动函数,包含方向信息\n def update(self):\n self.rect.x += self.ai_settings.guai_speed_factor*self.guai_direction\n \n\n #重新绘制怪\n def blitme(self):\n self.screen.blit(self.image,self.rect)", "sub_path": "class_Guai/class_Guai.py", "file_name": "class_Guai.py", "file_ext": "py", "file_size_in_byte": 1107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 4, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "289486105", "text": "# Import socket module\nimport socket\nimport sys\nimport numpy as np\nimport time\nimport threading\n\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\n\nimport myTools.cnn_model as cnn\n\nflags.DEFINE_string('ip', '127.0.0.1', 'default ip')\nflags.DEFINE_integer('port', 12346, 'default port')\n\nnp.set_printoptions(threshold=sys.maxsize)\n\ninfo = []\n\n\ndef parse_message_received(data):\n parse = data.split(\";\")\n return parse\n\n\ndef get_img(data):\n tmp_data = data.split(\" \")\n print(tmp_data[1:-1])\n return\n\n\ndef main(_argv):\n logging.info('load cat modem')\n\n dog_1 = time.time()\n dog_model = cnn.get_inception_v2_cat()\n dog_2 = time.time()\n logging.info('cat model load in {:.2f}ms'.format((dog_2 - dog_1)))\n\n logging.info('Initialization connection at {}:{}'.format(FLAGS.ip, FLAGS.port))\n time_c_1 = time.time()\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((FLAGS.ip, FLAGS.port))\n time_c_2 = time.time()\n logging.info('Connected to {}:{} in {:.3f}ms'.format(FLAGS.ip, FLAGS.port, (time_c_2 - time_c_1)))\n\n message = \"0;RASP;127.0.0.1;CAT;\"\n send_1 = time.time()\n s.send(message.encode('utf-8'))\n send_2 = time.time()\n logging.info('Send identification message {} in {}ms'.format(message, (send_2 - send_1)))\n\n while True:\n\n try:\n data = s.recv(3000000)\n message_parsed = parse_message_received(data.decode('utf-8'))\n logging.info(\n 'message {} size : {:.2f}Mb received'.format(message_parsed[0], (sys.getsizeof(data) / 1000000)))\n if len(message_parsed) == 4:\n logging.info('\\t image {}'.format(message_parsed[0]))\n img = eval('np.array(' + message_parsed[2] + ')')\n process = ThreadCAT(message_parsed[0], img, dog_model, s)\n process.start()\n if data.decode('utf-8') == \"close\":\n break\n except (ConnectionResetError, ConnectionRefusedError):\n logging.info(\"Server close the connexion or not online\")\n break\n # close the connection\n s.close()\n\n\nclass ThreadCAT(threading.Thread):\n def __init__(self, id_img, img, model, s):\n threading.Thread.__init__(self)\n self.id_img = id_img\n self.img = img\n self.model = model\n self.s = s\n\n def run(self):\n cnn.thread_for_cnn(self.id_img, self.img, self.model, self.s)\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n", "sub_path": "TCPRasp.py", "file_name": "TCPRasp.py", "file_ext": "py", "file_size_in_byte": 2533, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "absl.flags.DEFINE_string", "line_number": 13, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 13, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 14, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.set_printoptions", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 16, "usage_type": "attribute"}, {"api_name": "absl.logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 33, "usage_type": "name"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "myTools.cnn_model.get_inception_v2_cat", "line_number": 36, "usage_type": "call"}, {"api_name": "myTools.cnn_model", "line_number": 36, "usage_type": "name"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 38, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 40, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.ip", "line_number": 40, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS", "line_number": 40, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.port", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 42, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 42, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 42, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS.ip", "line_number": 43, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS", "line_number": 43, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.port", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 45, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.ip", "line_number": 45, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS", "line_number": 45, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.port", "line_number": 45, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 51, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 51, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 58, "usage_type": "name"}, {"api_name": "sys.getsizeof", "line_number": 59, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 61, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 68, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 74, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 76, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 76, "usage_type": "attribute"}, {"api_name": "myTools.cnn_model.thread_for_cnn", "line_number": 83, "usage_type": "call"}, {"api_name": "myTools.cnn_model", "line_number": 83, "usage_type": "name"}, {"api_name": "absl.app.run", "line_number": 88, "usage_type": "call"}, {"api_name": "absl.app", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "223910703", "text": "import time\nfrom selenium.common.exceptions import NoSuchElementException\nfrom Automate_Tests_Content.Web.Selenuim.utils.full_regression_utility import FullRegressionUtility\nfrom Automate_Tests_Content.Web.Selenuim.logger import Logger\n\n__author__ = \"Chulud Mallak\"\n__copyright__ = \"Utility\"\n__credits__ = [\"Chulud Mallak\"]\n__version__ = \"1.0.0\"\n__maintainer__ = \"Yaacov Pinhas\"\n__email__ = \"chuludx.mallak@intel.com\"\n__status__ = \"Production\"\n\n\nclass InfoButtonUtility:\n logger = Logger()\n __full_regression_utility = FullRegressionUtility()\n\n def __init__(self):\n pass\n\n def display_Search_Box(self, column_name, num, txt, elementid, browser):\n try:\n text_box = None\n div = browser.find_element_by_id(elementid)\n rows = div.find_elements_by_tag_name(\"tr\")\n td = rows[1].find_elements_by_tag_name(\"td\")\n for i in td:\n if i.text == column_name:\n self.logger.Info(\"Displays rows that contain a name that contains {} characters in {} coulmn \\n\".\n format(txt, column_name))\n time.sleep(1)\n first_table = browser.find_element_by_class_name(\"obj\")\n div = browser.find_elements_by_class_name(\"filter\")\n if column_name != \"Author\":\n text_box = div[num - 1].find_element_by_tag_name(\"input\")\n time.sleep(1)\n text_box.clear()\n text_box.send_keys(txt)\n time.sleep(7)\n else:\n select_box = div[num - 1].find_element_by_tag_name(\"select\")\n for option in select_box.find_elements_by_tag_name(\"option\"):\n if option.text == txt:\n option.click()\n break\n rows = first_table.find_elements_by_tag_name(\"tr\")\n rows.pop(0)\n time.sleep(1)\n if text_box is not None:\n text_box.clear()\n time.sleep(5)\n flag = 0\n for tr in rows:\n td = tr.find_elements_by_tag_name(\"td\")\n if td[num - 1].text.find(txt) == -1:\n flag = 1\n if flag == 1:\n self.logger.Info(\"sorting by type letters not succeeded...\\n\")\n else:\n print (\"sorting by type letters succeeded...\\n\")\n break\n finally:\n pass\n\n def Cont_Scroll_Off(self, browser):\n try:\n cont_scroll = browser.find_element_by_id(\"InfiniteScrollModeButton\")\n cont_scroll.click()\n time.sleep(2)\n files_list = browser.find_elements_by_class_name(\"InfiniteScrollPage\")\n if len(files_list) == 1:\n self.logger.Info(\"cont_scroll OFF\\n\")\n except Exception as e:\n self.logger.Error(str(e))\n\n def display_table_details(self, elementid, titles, tabname, browser):\n try:\n table = browser.find_element_by_id(elementid)\n rows = table.find_elements_by_tag_name(\"tr\")\n flag = 1\n td = rows[1].find_elements_by_tag_name(\"td\")\n for i in range(4):\n if td[i].text != titles[i]:\n flag = 0\n break\n if flag == 1:\n colmns = rows[2].find_elements_by_tag_name(\"td\")\n compare_putton = colmns[len(colmns)-1]\n if compare_putton is not None:\n self.logger.Info(\"Display Table of {} details succeeded...\\n\".format(tabname))\n except Exception as e:\n self.logger.Error(str(e))\n\n def Close_Tab(self, elementid, tabname, browser):\n try:\n close_button = browser.find_element_by_class_name(\"CloseInfoIcon\")\n time.sleep(4)\n close_button.click()\n time.sleep(6)\n try:\n browser.find_element_by_id(elementid)\n except NoSuchElementException:\n self.logger.Info(\"Close {} Tab succeeded...\\n\".format(tabname))\n except Exception as e:\n self.logger.Error(str(e))\n\n def get_started(self, tabname, browser):\n try:\n self.__full_regression_utility.cancel_edit_mode(browser)\n self.__full_regression_utility.switch_browser_to_frame(\"ContentOuterIFrame\", browser)\n self.Cont_Scroll_Off(browser)\n self.__full_regression_utility.click_on_info_button(browser)\n self.__full_regression_utility.click_option_menu_button(tabname, browser) # open References Tab\n except Exception as e:\n self.logger.Error(str(e))\n", "sub_path": "Automate_Tests_Content/Web/Selenuim/FullRegression/InfoButtonMenu/info_button_utility.py", "file_name": "info_button_utility.py", "file_ext": "py", "file_size_in_byte": 4930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "Automate_Tests_Content.Web.Selenuim.logger.Logger", "line_number": 16, "usage_type": "call"}, {"api_name": "Automate_Tests_Content.Web.Selenuim.utils.full_regression_utility.FullRegressionUtility", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "612184118", "text": "import torch.nn as nn\nimport torch\nimport numpy as np\nimport math\n\n\n\n\ndef _fast_hist(true, pred, num_classes):\n pred = np.round(pred).astype(int)\n true = np.round(true).astype(int)\n mask = (true >= 0) & (true < num_classes)\n hist = np.bincount(\n num_classes * true[mask] + pred[mask],\n minlength=num_classes ** 2,\n ).reshape(num_classes, num_classes).astype(np.float)\n return hist\n\ndef jaccard_index(hist):\n \"\"\"Computes the Jaccard index, a.k.a the Intersection over Union (IoU).\n Args:\n hist: confusion matrix.\n Returns:\n avg_jacc: the average per-class jaccard index.\n \"\"\"\n A_inter_B = np.diag(hist)\n A = np.sum(hist,axis=1)\n B = np.sum(hist,axis=0)\n jaccard = A_inter_B / (A + B - A_inter_B + 1e-6)\n avg_jacc =np.nanmean(jaccard) #the mean of jaccard without NaNs\n return avg_jacc, jaccard\n\ndef dice_coef_metric(hist):\n \"\"\"Computes the dice coefficient).\n Args:\n hist: confusion matrix.\n Returns:\n avg_dice: the average per-class dice coefficient.\n \"\"\"\n A_inter_B = np.diag(hist)\n A = np.sum(hist,axis=1)\n B = np.sum(hist,axis=0)\n dsc = A_inter_B * 2 / (A + B + 1e-6)\n avg_dsc=np.nanmean(dsc) #the mean of dsc without NaNs\n return avg_dsc\n\ndef dice_coef(inputs, target):\n intersection = 2.0 * (target * inputs).sum()+1e-4\n sum_ = target.sum() + inputs.sum()+1e-4\n\n return intersection / sum_\n\ndef dice_coef_loss(y_pred, y_true):\n smooth=1.0\n assert y_pred.size() == y_true.size()\n intersection = (y_pred * y_true).sum()\n dsc = (2. * intersection + smooth) / (\n y_pred.sum() + y_true.sum() + smooth\n )\n return 1. - dsc\n\n\ndef bce_dice_loss(y_pred, y_true):\n dicescore = dice_coef_loss(y_pred, y_true)\n log_cosh_dice=math.log(math.cosh(dicescore))\n bcescore = nn.BCELoss()\n m = nn.Sigmoid()\n bceloss = bcescore(m(y_pred), y_true)+log_cosh_dice\n return (bceloss)\n", "sub_path": "loss.py", "file_name": "loss.py", "file_ext": "py", "file_size_in_byte": 1949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.round", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 44, "usage_type": "call"}, {"api_name": "math.log", "line_number": 65, "usage_type": "call"}, {"api_name": "math.cosh", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "429018511", "text": "# Import library for keyboard, mouse inputs and display\r\nimport sys\r\n\r\nimport pygame\r\n\r\n# Initializes library pygame and pygames font\r\npygame.init()\r\npygame.font.init()\r\n\r\n# Defines colours used\r\nred = (218, 86, 72)\r\ngreen = (66, 151, 71)\r\nblue = (65, 80, 214)\r\n\r\n# Defines the size of the window\r\nscreenWidth = 1200\r\nscreenHeigh = 800\r\n\r\n# Defines how many bricks there will be.\r\ncols = 6\r\nrows = 6\r\n\r\n# Game variables\r\nplaying = False\r\nmainMenu = True\r\ncontrolSelect = False\r\ngameOver = 0\r\nscore = 0\r\nuseMouse = None\r\n\r\n\r\n# Creates the Bricks\r\nclass Bricks:\r\n def __init__(self):\r\n # Sets the width and height of each brick\r\n self.width = screenWidth // cols\r\n self.height = 50\r\n\r\n # Stores each brick\r\n self.bricks = []\r\n\r\n # Stores bricks in the row\r\n self.eachBrick = []\r\n\r\n # Creates the bricks\r\n def createBricks(self):\r\n # For each row\r\n for row in range(rows):\r\n # Stores each row\r\n brickRow = []\r\n # For each collum\r\n for col in range(cols):\r\n # Sets the X and Y value of the top left of each brick\r\n brickX = col * self.width\r\n brickY = row * self.height\r\n # Draws the bricks\r\n Rect = pygame.Rect(brickX, brickY, self.width, self.height)\r\n # Checks if the row is of the first third of the rows\r\n if row < rows // 3:\r\n # Sets the brick heath to 3\r\n health = 3\r\n # Checks if the row is of the first third of the rows\r\n elif row < (rows // 3) * 2:\r\n # Sets the brick heath to 2\r\n health = 2\r\n # Checks if the row is of the first third of the rows\r\n elif row < (rows // 3) * 3:\r\n # Sets the brick heath to 1\r\n health = 1\r\n # Stores the brick and health in a list\r\n eachBrick = [Rect, health]\r\n # Adds the brick and brick health to the row\r\n brickRow.append(eachBrick)\r\n # Adds the row to the bricks list\r\n self.bricks.append(brickRow)\r\n\r\n # Shows the bricks\r\n def showBricks(self):\r\n # For each row in the bricks list\r\n for row in self.bricks:\r\n # For each brick in the row\r\n for brick in row:\r\n # Checks the heath of the bricks\r\n if brick[1] == 3:\r\n # Sets the brick colour\r\n brickColour = red\r\n # Checks the heath of the bricks\r\n elif brick[1] == 2:\r\n # Sets the brick colour\r\n brickColour = green\r\n # Checks the heath of the bricks\r\n elif brick[1] == 1:\r\n # Sets the brick colour\r\n brickColour = blue\r\n # Draws the bricks\r\n pygame.draw.rect(canvas, brickColour, brick[0])\r\n # Draws the bricks border\r\n pygame.draw.rect(canvas, (0, 0, 0), (brick[0]), 1)\r\n\r\n\r\n# Creates Paddle\r\nclass Paddle:\r\n def __init__(self):\r\n # Sets the width and height of the paddle\r\n self.height = 20\r\n self.width = screenWidth // cols\r\n\r\n # Sets the x and y position of the paddle\r\n self.x = (screenWidth // 2) - (self.width // 2)\r\n self.y = screenHeigh - (self.height * 2)\r\n\r\n # Defines the rectangle\r\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\r\n\r\n # Sets the direction and speed of the brick\r\n self.direction = 0\r\n self.speed = 10\r\n\r\n # Saves the old mouse x pos for use later\r\n self.oldMouseX = 0\r\n\r\n # Allows you to move the paddle with your keyboard\r\n def keyboardmove(self):\r\n # Resets the saved direction of the paddle\r\n self.direction = 0\r\n\r\n # Sets pressed key in a variable\r\n key = pygame.key.get_pressed()\r\n\r\n # Checks if you push the left arrow key\r\n if key[pygame.K_LEFT] and self.rect.left > 0:\r\n # Moves the paddle\r\n self.rect.x -= self.speed\r\n # Sets the direction of the paddle\r\n self.direction = -1\r\n\r\n # Checks if you push the left arrow key\r\n if key[pygame.K_RIGHT] and self.rect.right < screenWidth:\r\n # Moves the paddle\r\n self.rect.x += self.speed\r\n\r\n # Sets the direction of the paddle\r\n self.direction = 1\r\n\r\n # Allows you to move the paddle with your mouse\r\n def mouseMove(self):\r\n # Resets the save direction of the mouse\r\n self.direction = 0\r\n # Gets the mouse pos tuple and convert it to the variables mouseX and mouseY\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n # Check if the mouseX position is greater than the old mouseX position\r\n if mouseX > self.oldMouseX:\r\n # Sets the direction to right\r\n self.direction = 1\r\n # Check if the mouseX position is less than the old mouseX position\r\n if mouseX < self.oldMouseX:\r\n # Sets the direction to left\r\n self.direction = 1\r\n # Move the paddle to the mouseX position\r\n self.rect.x = mouseX - self.rect.width / 2\r\n # Save the old mouseX\r\n self.oldMouseX = mouseX\r\n\r\n # Draws the paddle\r\n def show(self):\r\n pygame.draw.rect(canvas, red, self.rect)\r\n\r\n\r\n# Creates the ball\r\nclass Ball:\r\n # When ball is called require a x and y value\r\n def __init__(self, x, y):\r\n # Sets the balls radius\r\n self.radius = 10\r\n # Sets the X and Y coordinates of the ball\r\n self.x = x - self.radius\r\n self.y = y\r\n # Defines the balls hit-box\r\n self.rect = pygame.Rect(self.x, self.y, self.radius * 2, self.radius * 2)\r\n # Sets the speed and the max speed of the ball.\r\n self.speedX = 4\r\n self.speedY = -4\r\n self.maxSpeed = 5\r\n # Sets how a game over happened\r\n self.gameOver = 0\r\n\r\n # Moves the ball\r\n def move(self):\r\n # Allows the score to be set\r\n global score\r\n # Defines the hit threshold\r\n threshold = 5\r\n # Resets the brickDestroyed variable\r\n bricksDestroyed = True\r\n # Creates the row counter\r\n rowC = 0\r\n # For each row in bricks list\r\n for row in bricks.bricks:\r\n # Creates the item counter\r\n itemC = 0\r\n # For each item in the row\r\n for item in row:\r\n # Checks if the ball collides with a brick\r\n if self.rect.colliderect(item[0]):\r\n # Checks if the ball collides with the top of the brick\r\n if abs(self.rect.bottom - item[0].top) < threshold and self.speedY > 0:\r\n # Moves the ball in the opposite y direction\r\n self.speedY *= -1\r\n # Checks if the ball collides with the bottom of the brick\r\n if abs(self.rect.top - item[0].bottom) < threshold and self.speedY < 0:\r\n # Moves the ball in the opposite y direction\r\n self.speedY *= -1\r\n # Checks if the ball collides with the left of the brick\r\n if abs(self.rect.right - item[0].left) < threshold and self.speedX > 0:\r\n # Moves the ball in the opposite x direction\r\n self.speedX *= -1\r\n # Checks if the ball collides with the right of the brick\r\n if abs(self.rect.left - item[0].right) < threshold and self.speedX < 0:\r\n # Moves the ball in the opposite x direction\r\n self.speedX *= -1\r\n # adds 10 to the score\r\n score += 10\r\n # Checks if the health of the brick is greater than 1\r\n if bricks.bricks[rowC][itemC][1] > 1:\r\n # Lowers the health of the bricks\r\n bricks.bricks[rowC][itemC][1] -= 1\r\n # If the heath is not greater than 1\r\n else:\r\n # Make the brick invisible and moves it off screen.\r\n bricks.bricks[rowC][itemC][0] = (-100, -100, 0, 0)\r\n\r\n # Checks if the there are bricks left\r\n if bricks.bricks[rowC][itemC][0] != (-100, -100, 0, 0):\r\n # Sets bricks destroyed to be false\r\n bricksDestroyed = False\r\n # Adds 1 to the item Counter\r\n itemC += 1\r\n # Adds 1 to the row counter\r\n rowC += 1\r\n\r\n # Checks if all bricks are destroyed\r\n if bricksDestroyed:\r\n # Sets gameOver\r\n self.gameOver = 1\r\n # Checks if the ball collide with the left or right side of the screen\r\n if self.rect.left < 0 or self.rect.right > screenWidth:\r\n # Reverse the balls x direction\r\n self.speedX *= -1\r\n # Checks if the ball collide with the top of the screen\r\n if self.rect.top < 0:\r\n # Reverse the balls Y direction\r\n self.speedY *= -1\r\n # Checks if the ball collide with the bottom of the screen\r\n if self.rect.bottom > screenHeigh:\r\n # Sets gameOver to state that you lost\r\n self.gameOver = -1\r\n\r\n # Checks if the ball collides with the paddle\r\n if self.rect.colliderect(paddle):\r\n # Checks if the paddle collides with the top of the paddle\r\n if abs(self.rect.bottom - paddle.rect.top) < threshold and self.speedY > 0:\r\n # Reverse the y direction\r\n self.speedY *= -1\r\n # Increases the X speed of the ball\r\n self.speedX += paddle.direction\r\n # Checks if the X speed of the ball is greater then the max speed\r\n if self.speedX > self.maxSpeed:\r\n # Sets the X speed of the ball to the max speed\r\n self.speedX = self.maxSpeed\r\n # Checks if the x speed is less then 0 and if the x speed is less than negative the max speed\r\n elif self.speedX < 0 and self.speedX < -self.maxSpeed:\r\n # Sets the X speed to the negative of the max speed\r\n self.speedX = -self.maxSpeed\r\n # Checks if it does not collide with the top of the paddle\r\n else:\r\n # Reverse the x speed\r\n self.speedX *= -1\r\n\r\n # Moves the ball the amount of the current ball speed\r\n self.rect.x += self.speedX\r\n self.rect.y += self.speedY\r\n\r\n # Returns the game over state\r\n return self.gameOver\r\n\r\n # Lets you draw the ball\r\n def show(self):\r\n # Draws the ball\r\n pygame.draw.circle(canvas, red, (self.rect.x + self.radius, self.rect.y + self.radius), self.radius)\r\n\r\n\r\n# Creates the menu\r\nclass Menus:\r\n def __init__(self):\r\n # Sets the title font\r\n self.titleF = pygame.font.Font(\"ARIAL.TTF\", 100)\r\n # Sets the rest if the buttons font\r\n self.font = pygame.font.Font(\"ARIAL.TTF\", 45)\r\n # Creates a rectangle the size of the each label to check if it gets clicked in the game loop\r\n self.play = pygame.Rect(screenWidth / 2 - 88 / 2, screenHeigh - 100, 88, 51)\r\n self.mouse = pygame.Rect(screenWidth / 2 - 135 / 2, screenHeigh - 250, 135, 51)\r\n self.keyboard = pygame.Rect(screenWidth / 2 - 198 / 2, screenHeigh - 150, 198, 51)\r\n self.keepPlaying = pygame.Rect(screenWidth / 2 - 181 / 2, screenHeigh - 250, 181, 51)\r\n self.toMenu = pygame.Rect(screenWidth / 2 - 277 / 2, screenHeigh - 150, 277, 51)\r\n\r\n # Shows the Main Menu\r\n def Main(self):\r\n # Shows the title\r\n title = self.titleF.render(\"Breakout\", False, red)\r\n canvas.blit(title, (screenWidth / 2 - title.get_rect().width / 2, 25))\r\n # Shows the play button\r\n play = self.font.render(\"Play\", False, green)\r\n canvas.blit(play, (self.play.x, self.play.y))\r\n\r\n # Shows the controls menu\r\n def Controls(self):\r\n # Shows the title\r\n title = self.titleF.render(\"Controller select\", False, blue)\r\n canvas.blit(title, (screenWidth / 2 - title.get_rect().width / 2, 25))\r\n # Shows the mouse button\r\n mouse = self.font.render(\"Mouse\", False, green)\r\n canvas.blit(mouse, (self.mouse.x, self.mouse.y))\r\n # Shows the keyboard button\r\n keyboard = self.font.render(\"KeyBoard\", False, green)\r\n canvas.blit(keyboard, (self.keyboard.x, self.keyboard.y))\r\n\r\n # Shows the win menu\r\n def Win(self):\r\n # Shows the title\r\n title = self.titleF.render(\"You Win!\", False, green)\r\n canvas.blit(title, (screenWidth / 2 - title.get_rect().width / 2, 25))\r\n # Shows the continue button\r\n keepPlaying = self.font.render(\"Continue\", False, blue)\r\n canvas.blit(keepPlaying, (self.keepPlaying.x, self.keepPlaying.y))\r\n # Shows the back to menu button\r\n toMenu = self.font.render(\"Back to menu\", False, blue)\r\n canvas.blit(toMenu, (self.toMenu.x, self.toMenu.y))\r\n\r\n\r\n# Initializes the bricks, the paddle, the ball, and the menus\r\nbricks = Bricks()\r\nbricks.createBricks()\r\npaddle = Paddle()\r\nball = Ball(paddle.x + (paddle.width // 2), paddle.y - paddle.height)\r\nmenu = Menus()\r\n\r\n# Sets the font and the font size\r\nfont = pygame.font.Font(\"ErbosDraco1StOpenNbpRegular-l5wX.ttf\", 30)\r\n\r\n# Creates the window and sets a display name.\r\ncanvas = pygame.display.set_mode((screenWidth, screenHeigh))\r\npygame.display.set_caption(\"Creative Task 2\")\r\n\r\n# Sets the running state to be true\r\nrun = True\r\n# While run is true or the main game loop\r\nwhile run:\r\n # Sets the background colour to be black\r\n canvas.fill((35, 37, 39))\r\n # print(pygame.mouse.get_pos())\r\n # Checks if you are in the main menu\r\n if mainMenu:\r\n menu.Main()\r\n if controlSelect:\r\n menu.Controls()\r\n # Checks if you are playing\r\n if playing:\r\n # Shows the bricks\r\n bricks.showBricks()\r\n # Shows the paddle\r\n paddle.show()\r\n # shows the ball\r\n ball.show()\r\n # Shows the score\r\n label = font.render(str(score), False, (255, 255, 255))\r\n canvas.blit(label, (12, screenHeigh - 42))\r\n # Allows the ball to move and gets the gameOver variable from ball.move\r\n gameOver = ball.move()\r\n # Checks if you are using the mouse\r\n if useMouse:\r\n # Allows the paddle to be moved by your mouse\r\n paddle.mouseMove()\r\n # If you aren't using your mouse\r\n else:\r\n # Allows the paddle to be moved by your keyboard\r\n paddle.keyboardmove()\r\n\r\n # Checks if you won\r\n if gameOver == 1:\r\n # Ends the game\r\n playing = False\r\n # Open the win menu\r\n menu.Win()\r\n # Checks if you lost\r\n if gameOver == -1:\r\n # Ends the game\r\n playing = False\r\n # Gets the mouse position\r\n pos = pygame.mouse.get_pos()\r\n # For each event in pygame\r\n for e in pygame.event.get():\r\n # Checks if the event is a quit\r\n if e.type == pygame.QUIT:\r\n # Stops the game loop\r\n run = False\r\n # Checks if you click your mouse\r\n if e.type == pygame.MOUSEBUTTONDOWN:\r\n # Checks if you click the play button and you are in the main menu\r\n if menu.play.collidepoint(pos) and mainMenu:\r\n # Sets that you are in the control select menu\r\n controlSelect = True\r\n # Sets that you are not in the main menu\r\n mainMenu = False\r\n\r\n # Checks if you click the mouse button and you are in the controller select menu\r\n if menu.mouse.collidepoint(pos) and controlSelect:\r\n # Takes you out of the controller select menu\r\n controlSelect = False\r\n # Takes you in to the game\r\n playing = True\r\n # Sets that you are using mouse controls\r\n useMouse = True\r\n # Checks if you click the keyboard button and you are in the controller select menu\r\n if menu.keyboard.collidepoint(pos) and controlSelect:\r\n # Takes you out of the controller select menu\r\n controlSelect = False\r\n # Takes you in to the game\r\n playing = True\r\n # Sets that you are not using mouse controls\r\n useMouse = False\r\n # Checks if you click Continue or Reset\r\n if menu.keepPlaying.collidepoint(pos):\r\n # Checks if you won\r\n if gameOver == 1:\r\n # Moves the ball the its starting position\r\n ball.rect.x = ball.x\r\n ball.rect.y = ball.y\r\n # Starts playing the game\r\n playing = True\r\n # Sets game to not be over\r\n gameOver = 0\r\n ball.gameOver = 0\r\n # Deletes old bricks\r\n bricks.bricks.clear()\r\n # Creates new bricks\r\n bricks.createBricks()\r\n # Checks if you click back to menu and if gameOver is not 1\r\n if menu.toMenu.collidepoint(pos) and gameOver != 0:\r\n # Moves the ball the its starting position\r\n ball.rect.x = ball.x\r\n ball.rect.y = ball.y\r\n # Bring you back to the menu\r\n mainMenu = True\r\n # Sets game to not be over\r\n gameOver = 0\r\n ball.gameOver = 0\r\n # Resets the score\r\n score = 0\r\n # Deletes old bricks\r\n bricks.bricks.clear()\r\n # Creates new bricks\r\n bricks.createBricks()\r\n\r\n # Updates the canvas\r\n pygame.display.update()\r\n\r\n # Limits the frames per second to 60\r\n pygame.time.Clock().tick(60)\r\n\r\n# Exits the game\r\npygame.QUIT\r\n# End of program.\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 18335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 289, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 296, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 296, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 300, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 301, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 302, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 303, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 304, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 348, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 348, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 351, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 351, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 352, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 352, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 399, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 399, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 401, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 401, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 403, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 407, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 465, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 465, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 468, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 468, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 471, "usage_type": "attribute"}]} +{"seq_id": "237632004", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport os\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\n#from sklearn.feature_extraction.text import TfidfTransformer\n\nimport nltk\nfrom nltk.stem.porter import PorterStemmer\n\n\n# In[2]:\n\n\ncount = CountVectorizer() #from sklearn.feature_extraction.text import CountVectorizer\ndocs = np.array([\n 'The sun is shining',\n 'The weather is sweet',\n 'The sun is shining, the weather is sweet, and one and one is two'])\nbag = count.fit_transform(docs)\n\n\n# In[3]:\n\n\nprint(count.vocabulary_) # vocabulary_ attribute of CountVectorizer() shows a mapping of terms to feature indices.\n\n\n# In[4]:\n\n\nprint(bag.toarray())\n\n\n# In[5]:\n\n\ncount_2 = CountVectorizer(ngram_range=(1,2))\nbag_2 = count_2.fit_transform(docs)\nprint(count_2.vocabulary_)\nprint(bag_2.toarray())\n\n\n# In[6]:\n\n\nnp.set_printoptions(precision=2) # These options determine the way floating point numbers are displayed.\n\n\n# In[7]:\n\n\ntfidf = TfidfTransformer(use_idf=True, \n norm='l2', \n smooth_idf=True)\nprint(tfidf.fit_transform(count.fit_transform(docs))\n .toarray())\n\n\n# In[8]:\n\n\ntf_is = 3 # suppose term \"is\" has a frequency of 3\nn_docs = 3\nidf_is = np.log((n_docs+1) / (3+1))\ntfidf_is = tf_is * (idf_is + 1)\nprint('tf-idf of term \"is\" = %.2f' % tfidf_is)\n\n\n# In[9]:\n\n\ntfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=True)\nraw_tfidf = tfidf.fit_transform(count.fit_transform(docs)).toarray()[-1]\nraw_tfidf \n\n\n# In[10]:\n\n\nl2_tfidf = raw_tfidf / np.sqrt(np.sum(raw_tfidf**2))\nl2_tfidf\n\n\n# In[11]:\n\n\ncorpus = [\n 'This is the first document.',\n 'This document is the second document.',\n 'And this is the third one.',\n 'Is this the first document?',\n ]\n\nvectorizer = TfidfVectorizer()\nX = vectorizer.fit_transform(corpus)\nprint(vectorizer.get_feature_names())\n\nprint(X.shape)\n\n\n# In[12]:\n\n\nvectorizer_123 = TfidfVectorizer(ngram_range=(1,3))\nX_123 = vectorizer_123.fit_transform(corpus)\nprint(vectorizer_123.get_feature_names())\n\nprint(X_123.shape)\n\n\n# In[13]:\n\n\nvectorizer_mm = TfidfVectorizer(max_df=1.0,min_df=0.5)\nX_mm = vectorizer_mm.fit_transform(corpus)\nprint(vectorizer_mm.get_feature_names())\n\nprint(X_mm.shape)\n\n\n# In[14]:\n\n\ndf = pd.read_csv('movie_data_cat.csv', encoding='utf-8')\ndf.head(10)\n\n\n# In[15]:\n\n\ndf.shape\ndf.columns\n\n\n# In[16]:\n\n\nclass_mapping = {label:idx for idx,label in enumerate(np.unique(df['sentiment']))}\n\nprint(class_mapping)\n\n#use the mapping dictionary to transform the class labels into integers\n\ndf['sentiment'] = df['sentiment'].map(class_mapping)\ndf.head(10)\n\n\n# In[17]:\n\n\ndf.loc[5635, 'review']#[-50:]\n\n\n# In[18]:\n\n\n#import regular expressions to clean up the text\nimport re\ndef preprocessor(text):\n text = re.sub('<[^>]*>', '', text) # remove all html markup\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text) # findall the emoticons\n \n # remove the non-word chars '[\\W]+'\n # append the emoticons to end \n #convert all to lowercase\n # remove nose char for consistency\n text = (re.sub('[\\W]+', ' ', text.lower()) +\n ' '.join(emoticons).replace('-', '')) \n return text\n\n\n# In[19]:\n\n\npreprocessor(df.loc[3635, 'review'])#[-50:]\n\n\n# ## Apply the clean data preprocessor to the text\n\n# In[20]:\n\n\npreprocessor(\"
This :) is :( a test :-)!\")\n\n\n# In[21]:\n\n\n# apply the preprocessor to the entire dataframe (i.e. column review)\ndf['review'] = df['review'].apply(preprocessor)\n\n\n# ## Tokenise - break text into tokens\n\n# In[22]:\n\n\ndef tokenizer(text):\n return text.split()\n\n\n# In[23]:\n\n\nprint(tokenizer(\"Tokenise this sentence into its individual words\"))\n\n\n# In[24]:\n\n\nfrom nltk.corpus import stopwords \n\nnltk.download('stopwords')\n\n\n# create a method to accept a piece of tokenised text and return text back without the stopped words\n\n# In[25]:\n\n\nstop = set(stopwords.words('english'))\ndef stop_removal(text):\n return [w for w in text if not w in stop]\n\n\n# In[26]:\n\n\ntext = \"This is a sample sentence, demonstrating the removal of stop words.\"\nstopped_text = stop_removal(text.split())\nprint(stopped_text) \n\n\n# ## Stemming - Processing tokens into their root form\n\n# In[27]:\n\n\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords\n\n#See which languages are supported.\nprint(\" \".join(SnowballStemmer.languages))\n\n\n# In[28]:\n\n\n#get the english stemmer\nstemmer = SnowballStemmer(\"english\")\n\n#stem a word\nprint(stemmer.stem(\"running\"))\n\n\n# In[29]:\n\n\n#Decide not to stem stopwords with ignore_stopwords\nstemmer2 = SnowballStemmer(\"english\", ignore_stopwords=True)\n\n#compare the two versions of the stemmer\nprint(stemmer.stem(\"having\"))\n\nprint(stemmer2.stem(\"having\"))\n\n\n# In[30]:\n\n\n#The 'english' stemmer is better than the original 'porter' stemmer.\nprint(SnowballStemmer(\"english\").stem(\"generously\"))\n\nprint(SnowballStemmer(\"porter\").stem(\"generously\"))\n\n\n# # Tokenise + Stemming \n\n# In[31]:\n\n\ndef tokenizer_stemmer(text):\n return [stemmer.stem(word) for word in tokenizer(text)]#text.split()]\n\n\n# In[32]:\n\n\ntokenizer('runners like running and thus they run')\n\n\n# In[33]:\n\n\ntokenizer_stemmer('runners like running and thus they run')\n\n\n# You can clearly see from the code above the effect of the stemmer on the tokens\n\n# In[34]:\n\n\nfrom nltk.corpus import stopwords\n\nstop = stopwords.words('english')\n[w for w in tokenizer_stemmer('A runner likes running and runs a lot')[-8:]\nif w.lower() not in stop]\n\n\n# # Training a model for sentiment classification\n\n# In[35]:\n\n\nX_train = df.loc[:25000, 'review'].values\ny_train = df.loc[:25000, 'sentiment'].values\nX_test = df.loc[25000:, 'review'].values\ny_test = df.loc[25000:, 'sentiment'].values\n\n### smaller sample\nX_train = df.loc[:2500, 'review'].values\ny_train = df.loc[:2500, 'sentiment'].values\n\n\n# In[36]:\n\n\nparam_grid = [{'vect__ngram_range': [(1, 1)], #can also extract 2-grams of words in addition to the 1-grams (individual words)\n 'vect__stop_words': [stop, None], # use the stop dictionary of stopwords or not\n 'vect__tokenizer': [tokenizer_stemmer]}, # use a tokeniser and the stemmer \n ]\n\n\n# In[38]:\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = [{'vect__ngram_range': [(1, 5)], #can also extract 2-grams of words in addition to the 1-grams (individual words)\n 'vect__stop_words': [stop, None], # use the stop dictionary of stopwords or not\n 'vect__tokenizer': [tokenizer]}, # use a tokeniser and the stemmer \n ]\n\ntfidf = TfidfVectorizer(strip_accents=None,\n lowercase=False,\n preprocessor=None)\n\n\nmnb_tfidf = Pipeline([('vect', tfidf),\n ('clf', KNeighborsClassifier(n_neighbors=5))])\n\n\n \ngs_mnb_tfidf = GridSearchCV(mnb_tfidf, param_grid,\n scoring='accuracy',\n cv=5,\n verbose=1,\n n_jobs=1) \n\ngs_mnb_tfidf.fit(X_train, y_train)\nprint('Best parameter set: %s ' % gs_mnb_tfidf.best_params_)\nprint('CV Accuracy: %.3f' % gs_mnb_tfidf.best_score_)\nclf = gs_mnb_tfidf.best_estimator_\nprint('Test Accuracy: %.3f' % clf.score(X_test, y_test))\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "KNN_IMDB.py", "file_name": "KNN_IMDB.py", "file_ext": "py", "file_size_in_byte": 7556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 111, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 145, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 167, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 168, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 174, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 220, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 228, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 228, "usage_type": "name"}, {"api_name": "nltk.stem.snowball.SnowballStemmer.languages", "line_number": 250, "usage_type": "attribute"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 250, "usage_type": "name"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 257, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 267, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 279, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 281, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 312, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 312, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 356, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 361, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 362, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 366, "usage_type": "call"}]} +{"seq_id": "452999208", "text": "# Main file for Mynes Game project\n# Controls the game state based on player inputs and updates MynesBoard\n\nfrom MynesBoard import *\n# from MyneGUI import *\nimport pygame\n\nWHITE = (255, 0, 0)\nBLACK = (255, 255, 255)\nICON_SIZE = 24\n\n\nclass Mynes:\n \"\"\"\n This class is the main running Mynes game.\n\n === Attributes ===\n screen: uses the screen from MynesGUI\n board: uses the board from MyneBoard\n flag_count: Keeps track of how many flags the player has available to place\n\n \"\"\"\n\n # === Private Attributes ===\n # _running: pygame attribute that runs or stops the game\n # _flags_placed: Keeps track of how many flag objects are on the board\n\n game_board: MynesBoard\n # GUI: MynesGUI\n flag_count: int\n _running: bool\n\n # ---------Mynes methods--------- #\n def __init__(self):\n \"\"\"\n Create a Mynes game that has a list of players (mines, numbers, empty spaces, etc)\n \"\"\"\n self._running = False\n self._lost = False\n self.game_board = MynesBoard()\n # self.GUI = MynesGUI()\n self.screen = None\n self.flag_count = self.game_board.mine_count\n # Windows size in pixels\n self.width, self.height = self.game_board.width * ICON_SIZE, self.game_board.height * ICON_SIZE\n\n def get_number(self, x, y) -> int:\n \"\"\"\n :param x: x-coordinate on board\n :param y: y-coordinate on board\n :return: Number at (x,y) on the board.\n \"\"\"\n return self.board[x][y].number\n\n def get_flag(self, x, y) -> bool:\n \"\"\"\n :param x: x-coordinate on board\n :param y: y-coordinate on board\n :return: If a flag is placed at (x,y) on the board.\n \"\"\"\n return self.board[x][y].flagged\n\n def mynes_won(self) -> bool:\n \"\"\"\n :return: If player has won the game by flagging all mines.\n \"\"\"\n if self.flag_count > 0:\n return False\n else:\n x = 0\n y = 0\n for x in range(len(self.width)):\n for y in range(len(self.height)):\n # Spot has mine but no flag\n if (self.game_board.board[x][y].value == -1) and (self.game_board.board[x][y].flag == False):\n return False\n\n return True\n\n def mynes_lost(self) -> None:\n \"\"\"\n Mark the game as 'lost' if the player clicks a mine.\n \"\"\"\n # Generate a fail message and text box to be printed in screen center\n font = pygame.font.Font('freesansbold.ttf', 16)\n fail_text = font.render(\"FAIL, CLICK TO EXIT\", True, WHITE, BLACK)\n fail_box = fail_text.get_rect()\n fail_box.center = (self.width//2, self.height//2)\n\n self.screen.blit(fail_text, fail_box)\n pygame.display.flip()\n\n # End game\n self._lost = True\n\n # ---------Pygame Methods---------- #\n def on_init(self) -> None:\n \"\"\"\n Initialize the game's screen, and begin running the game.\n \"\"\"\n\n pygame.init()\n self.screen = pygame.display.set_mode \\\n ((self.width, self.height), pygame.HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n\n def on_event(self, event: pygame.event) -> None:\n \"\"\"\n React to the given as appropriate. Either the player makes a move or quits the game.\n \"\"\"\n if event.type == pygame.QUIT:\n self._running = False\n # player clicks when game is lost\n elif event.type == pygame.MOUSEBUTTONUP and self._lost:\n self._running = False\n # player clicks when game is running\n elif event.type == pygame.MOUSEBUTTONUP:\n (x, y) = pygame.mouse.get_pos()\n # Loop through MyneSquare objects\n for board_y in range(self.game_board.height):\n for board_x in range(self.game_board.width):\n square = self.game_board.board[board_x][board_y]\n # Square that mouse is over\n if square.hitbox.collidepoint(x, y):\n # 1 for left click, 3 for right click\n if event.button == 1:\n if square.value == -1:\n self.mynes_lost()\n # else:\n # self.board.clear_spaces\n # Right click for Flagging\n elif event.button == 3:\n # Remove Flag\n if square.flag:\n square.flag = False\n self.flag_count += 1\n square.icon = pygame.image.load(\"temp_empty.png\")\n # Don't Place Flag\n elif (not square.flag) and self.flag_count == 0:\n pass\n # Place Flag\n else:\n square.flag = True\n self.flag_count -= 1\n square.icon = pygame.image.load(\"temp_flag.png\")\n\n def quit(self) -> None:\n \"\"\"\n Clean up and close the game.\n \"\"\"\n\n pygame.quit()\n\n def render(self) -> None:\n \"\"\"\n Call MynesGUI to render the pygame screen.\n \"\"\"\n # Stop accepting player inputs when game is lost\n if not self._lost:\n font = pygame.font.Font('freesansbold.ttf', 12)\n for x in range(self.game_board.width):\n for y in range(self.game_board.height):\n # number = font.render(str(self.game_board.board[x][y].value), True, WHITE, BLACK)\n box = pygame.Rect(x * ICON_SIZE, y * ICON_SIZE, ICON_SIZE, ICON_SIZE)\n # box = self.game_board.board[x][y].hitbox\n self.screen.blit(self.game_board.board[x][y].icon, box)\n pygame.display.update()\n\n def execute(self) -> None:\n \"\"\"\n Run the game until the game ends.\n \"\"\"\n print(\"running\")\n self.on_init()\n print(\"running\")\n self.screen.fill(WHITE)\n while self._running:\n\n for event in pygame.event.get():\n self.on_event(event)\n self.render()\n\n self.quit()\n", "sub_path": "Mynes.py", "file_name": "Mynes.py", "file_ext": "py", "file_size_in_byte": 6359, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.font.Font", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.DOUBLEBUF", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.event", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 118, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 137, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 179, "usage_type": "attribute"}]} +{"seq_id": "31503839", "text": "# coding: utf-8\n\n\"\"\"\n MessageMedia REST API\n\n Australia's Leading Messaging Solutions for Business and Enterprise.\n\n OpenAPI spec version: 1.0.0\n Contact: support@messagemedia.com\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass DeliveryOptionsBodyInner(object):\n \"\"\"\n Do not edit the class manually.\n \"\"\"\n def __init__(self, delivery_type=None, delivery_addresses=None, delivery_format=None):\n \"\"\"\n DeliveryOptionsBodyInner - a model\n\n :param dict types: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.types = {\n 'delivery_type': 'str',\n 'delivery_addresses': 'list[str]',\n 'delivery_format': 'str'\n }\n\n self.attribute_map = {\n 'delivery_type': 'delivery_type',\n 'delivery_addresses': 'delivery_addresses',\n 'delivery_format': 'delivery_format'\n }\n\n self._delivery_type = delivery_type\n self._delivery_addresses = delivery_addresses\n self._delivery_format = delivery_format\n\n @property\n def delivery_type(self):\n \"\"\"\n Gets the delivery_type of this DeliveryOptionsBodyInner.\n How to deliver the report.\n\n :return: The delivery_type of this DeliveryOptionsBodyInner.\n :rtype: str\n \"\"\"\n return self._delivery_type\n\n @delivery_type.setter\n def delivery_type(self, delivery_type):\n \"\"\"\n Sets the delivery_type of this DeliveryOptionsBodyInner.\n How to deliver the report.\n\n :param delivery_type: The delivery_type of this DeliveryOptionsBodyInner.\n :type: str\n \"\"\"\n allowed_values = [\"EMAIL\"]\n if delivery_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `delivery_type` ({0}), must be one of {1}\"\n .format(delivery_type, allowed_values)\n )\n\n self._delivery_type = delivery_type\n\n @property\n def delivery_addresses(self):\n \"\"\"\n Gets the delivery_addresses of this DeliveryOptionsBodyInner.\n A list of email addresses to use as the recipient of the email. Only works for EMAIL delivery type\n\n :return: The delivery_addresses of this DeliveryOptionsBodyInner.\n :rtype: list[str]\n \"\"\"\n return self._delivery_addresses\n\n @delivery_addresses.setter\n def delivery_addresses(self, delivery_addresses):\n \"\"\"\n Sets the delivery_addresses of this DeliveryOptionsBodyInner.\n A list of email addresses to use as the recipient of the email. Only works for EMAIL delivery type\n\n :param delivery_addresses: The delivery_addresses of this DeliveryOptionsBodyInner.\n :type: list[str]\n \"\"\"\n\n self._delivery_addresses = delivery_addresses\n\n @property\n def delivery_format(self):\n \"\"\"\n Gets the delivery_format of this DeliveryOptionsBodyInner.\n Format of the report.\n\n :return: The delivery_format of this DeliveryOptionsBodyInner.\n :rtype: str\n \"\"\"\n return self._delivery_format\n\n @delivery_format.setter\n def delivery_format(self, delivery_format):\n \"\"\"\n Sets the delivery_format of this DeliveryOptionsBodyInner.\n Format of the report.\n\n :param delivery_format: The delivery_format of this DeliveryOptionsBodyInner.\n :type: str\n \"\"\"\n allowed_values = [\"CSV\"]\n if delivery_format not in allowed_values:\n raise ValueError(\n \"Invalid value for `delivery_format` ({0}), must be one of {1}\"\n .format(delivery_format, allowed_values)\n )\n\n self._delivery_format = delivery_format\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n", "sub_path": "messagemedia_rest_api/models/delivery_options_body_inner.py", "file_name": "delivery_options_body_inner.py", "file_ext": "py", "file_size_in_byte": 5851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "six.iteritems", "line_number": 145, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "246664637", "text": "#!/usr/bin/python3\n\"\"\"\nModule related with City class\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\n\n\n@app_views.route(\"states//cities\", methods=['GET'],\n strict_slashes=False)\ndef get_them_all_city(state_id):\n \"\"\"Retrive all cities from a given state\"\"\"\n city_state = storage.get(State, state_id)\n if city_state is None:\n abort(404)\n ret_list = []\n for city in city_state.cities:\n ret_list.append(city.to_dict())\n return jsonify(ret_list)\n\n\n@app_views.route(\"/cities/\", methods=['GET'],\n strict_slashes=False)\ndef get_city(city_id):\n \"\"\"Retrive object city from their id\"\"\"\n obj_city = storage.get(City, city_id)\n if obj_city is None:\n abort(404)\n return (jsonify(obj_city.to_dict()))\n\n\n@app_views.route(\"/cities/\", methods=['DELETE'],\n strict_slashes=False)\ndef delete_city(city_id):\n \"\"\"Delete an instance of a city\"\"\"\n del_obj = storage.get(City, city_id)\n if del_obj is not None:\n storage.delete(del_obj)\n storage.save()\n return jsonify({})\n else:\n abort(404)\n\n\n@app_views.route(\"/states//cities\", methods=['POST'],\n strict_slashes=False)\ndef post_city(state_id):\n \"\"\"Add an instance of a city\"\"\"\n if storage.get(State, state_id) is None:\n abort(404)\n if request.is_json:\n data = request.get_json()\n if \"name\" not in data:\n abort(400, \"Missing name\")\n new_city = City()\n setattr(new_city, \"state_id\", state_id)\n for k, v in data.items():\n setattr(new_city, k, v)\n new_city.save()\n return jsonify(new_city.to_dict()), 201\n else:\n abort(400, \"Not a JSON\")\n\n\n@app_views.route(\"/cities/\", methods=['PUT'],\n strict_slashes=False)\ndef put_city(city_id):\n \"\"\"Update an instance of a city\"\"\"\n obj = storage.get(City, city_id)\n if obj is None:\n abort(404)\n update = request.get_json()\n if update is not None:\n for k, v in update.items():\n if k not in [\"id\", \"state_id\", \"created_at\", \"updated_at\"]:\n setattr(obj, k, v)\n storage.save()\n return jsonify(obj.to_dict())\n else:\n abort(400, \"Not a JSON\")\n", "sub_path": "api/v1/views/cities.py", "file_name": "cities.py", "file_ext": "py", "file_size_in_byte": 2412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "models.storage.get", "line_number": 17, "usage_type": "call"}, {"api_name": "models.state.State", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 23, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 13, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 13, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 30, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 30, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 26, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 26, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 40, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 40, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 40, "usage_type": "name"}, {"api_name": "models.storage.delete", "line_number": 42, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 42, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 43, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 46, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 36, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 36, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.state.State", "line_number": 53, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 58, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 66, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 49, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 49, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 73, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 73, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 81, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 84, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 69, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "177974601", "text": "from pycram.process_module import ProcessModule\nfrom pycram.bullet_world import BulletWorld\nfrom pycram.helper import transform\nimport pycram.bullet_world_reasoning as btr\nimport pybullet as p\nimport numpy as np\nimport time\n\nright_arm_park = {\"r_shoulder_pan_joint\" : -1.712,\n \"r_shoulder_lift_joint\" : -0.256,\n \"r_upper_arm_roll_joint\" : -1.463,\n \"r_elbow_flex_joint\" : -2.12,\n \"r_forearm_roll_joint\" : 1.766,\n \"r_wrist_flex_joint\" : -0.07,\n \"r_wrist_roll_joint\" : 0.051}\nleft_arm_park = {\"l_shoulder_pan_joint\" : 1.712,\n \"l_shoulder_lift_joint\" : -0.264,\n \"l_upper_arm_roll_joint\" : 1.38,\n \"l_elbow_flex_joint\" : -2.12,\n \"l_forearm_roll_joint\" : 16.996,\n \"l_wrist_flex_joint\" : -0.073}\nik_joints = [\"fl_caster_rotation_joint\", \"fl_caster_l_wheel_joint\", \"fl_caster_r_wheel_joint\",\n \"fr_caster_rotation_joint\", \"fr_caster_l_wheel_joint\", \"fr_caster_r_wheel_joint\",\n \"bl_caster_rotation_joint\", \"bl_caster_l_wheel_joint\", \"bl_caster_r_wheel_joint\",\n \"br_caster_rotation_joint\", \"br_caster_l_wheel_joint\", \"br_caster_r_wheel_joint\",\n \"head_pan_joint\", \"head_tilt_joint\", \"laser_tilt_mount_joint\", \"r_shoulder_pan_joint\",\n \"r_shoulder_lift_joint\", \"r_upper_arm_roll_joint\", \"r_elbow_flex_joint\",\n \"r_forearm_roll_joint\", \"r_wrist_flex_joint\", \"r_wrist_roll_joint\",\n \"r_gripper_motor_slider_joint\", \"r_gripper_motor_screw_joint\",\n \"r_gripper_l_finger_joint\", \"r_gripper_l_finger_tip_joint\",\n \"r_gripper_r_finger_joint\", \"r_gripper_r_finger_tip_joint\",\n \"r_gripper_joint\", \"l_shoulder_pan_joint\", \"l_shoulder_lift_joint\",\n \"l_upper_arm_roll_joint\", \"l_elbow_flex_joint\", \"l_forearm_roll_joint\",\n \"l_wrist_flex_joint\", \"l_wrist_roll_joint\", \"l_gripper_motor_slider_joint\",\n \"l_gripper_motor_screw_joint\", \"l_gripper_l_finger_joint\",\n \"l_gripper_l_finger_tip_joint\", \"l_gripper_r_finger_joint\",\n \"l_gripper_r_finger_tip_joint\", \"l_gripper_joint\", \"torso_lift_motor_screw_joint\"]\n\n\ndef _apply_ik(robot, joint_poses):\n \"\"\"\n Applies a list of joint poses calculated by an inverse kinematics solver to a robot\n :param robot: The robot the joint poses should be applied on\n :param joint_poses: The joint poses to be applied\n :return: None\n \"\"\"\n for i in range(0, len(ik_joints)):\n robot.set_joint_state(ik_joints[i], joint_poses[i])\n\n\ndef _park_arms(arm):\n \"\"\"\n Defines the joint poses for the parking positions of the arms of the PR2 and applies them to the, in the BulletWorld\n defined robot.\n :return:\n \"\"\"\n #joint_poses = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.9, -0.1, 1.6, 1.7,\n # 0.087, 1.2, -1.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.9, -0.1, 1.6,\n # -1.7, -0.08, -1.2, 1.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n robot = BulletWorld.robot\n if arm == \"right\":\n for joint, pose in right_arm_park.items():\n robot.set_joint_state(joint, pose)\n if arm == \"left\":\n for joint, pose in left_arm_park.items():\n robot.set_joint_state(joint, pose)\n\n\nclass Pr2Navigation(ProcessModule):\n \"\"\"\n The process module to move the robot from one position to another.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == 'navigate':\n robot = BulletWorld.robot\n robot.set_position_and_orientation(solution['target'], solution['orientation'])\n\nclass Pr2PickUp(ProcessModule):\n \"\"\"\n This process module is for picking up a given object.\n The object has to be reachable for this process module to succeed.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == 'pick':\n obj = solution['object']\n robot = BulletWorld.robot\n target = obj.prop_value(\"pose\")\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(solution['gripper']), target,\n maxNumIterations=100)\n _apply_ik(robot, inv)\n robot.attach(obj.prop_value(\"bullet_obj\"), solution['gripper'])\n time.sleep(0.5)\n\n\nclass Pr2Place(ProcessModule):\n \"\"\"\n This process module places an object at the given position in world coordinate frame.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == 'place':\n obj = solution['object']\n robot = BulletWorld.robot\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(solution['gripper']), solution['target'],\n maxNumIterations=100)\n _apply_ik(robot, inv)\n robot.detach(obj.prop_value(\"bullet_obj\"))\n time.sleep(0.5)\n\nclass PR2EnvironmentManipulation(ProcessModule):\n \"\"\"\n This process module responsible for opening and closing container to access the objects inside. This works by firstly moving\n the end effector to the handle of the container. Next, the end effector is moved the respective distance to the back.\n This provides the illusion the robot would open the drawer by himself.\n Then the drawer will be opened by setting the joint pose of the drawer joint.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n kitchen = solution['part-of']\n if type(kitchen) is str:\n kitchen = BulletWorld.current_bullet_world.get_objects_by_name(kitchen)[0]\n\n if solution['cmd'] == 'open-prismatic' or solution['cmd'] == 'close-prismatic':\n # kitchen = solution['part-of']\n robot = BulletWorld.robot\n gripper = solution['gripper']\n container_handle = solution['handle']\n container_joint = solution['joint']\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper),\n kitchen.get_link_position(container_handle))\n _apply_ik(robot, inv)\n time.sleep(0.2)\n handle_pose = kitchen.get_link_position(container_handle)\n if solution['cmd'] == 'open-prismatic':\n distance = solution['distance']\n print(\"Process module distance: \" + str(distance))\n new_pose = [handle_pose[0] - distance, handle_pose[1], handle_pose[2]]\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), new_pose)\n _apply_ik(robot, inv)\n kitchen.set_joint_state(container_joint, distance)\n elif solution['cmd'] == 'close-prismatic':\n distance = kitchen.get_joint_state(container_joint)\n new_pose = [handle_pose[0] + distance, handle_pose[1], handle_pose[2]]\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), new_pose)\n _apply_ik(robot, inv)\n kitchen.set_joint_state(container_joint, 0.0)\n time.sleep(0.2)\n\n if solution['cmd'] == \"open-rotational\":\n # kitchen = solution['part-of']\n robot = BulletWorld.robot\n gripper = solution['gripper']\n container_handle = solution['handle']\n container_joint = solution['joint']\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper),\n kitchen.get_link_position(container_handle))\n _apply_ik(robot, inv)\n time.sleep(0.2)\n distance = solution['distance']\n kitchen.set_joint_state(container_joint, distance)\n handle_pose = kitchen.get_link_position(container_handle)\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), handle_pose)\n _apply_ik(robot, inv)\n\n if solution['cmd'] == \"close-rotational\":\n # kitchen = solution['part-of']\n robot = BulletWorld.robot\n gripper = solution['gripper']\n container_handle = solution['handle']\n container_joint = solution['joint']\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper),\n kitchen.get_link_position(container_handle))\n _apply_ik(robot, inv)\n time.sleep(0.2)\n distance = 0.0\n kitchen.set_joint_state(container_joint, distance)\n handle_pose = kitchen.get_link_position(container_handle)\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), handle_pose)\n _apply_ik(robot, inv)\n time.sleep(0.2)\n\nclass Pr2ParkArms(ProcessModule):\n \"\"\"\n This process module is for moving the arms in a parking position.\n It is currently not used.\n \"\"\"\n def _execute(self, desig):\n solutions = desig.reference()\n if solutions['cmd'] == 'park':\n _park_arms()\n\n\nclass Pr2MoveHead(ProcessModule):\n \"\"\"\n This process module moves the head to look at a specific point in the world coordinate frame.\n This point can either be a position or an object.\n \"\"\"\n def _execute(self, desig):\n solutions = desig.reference()\n if solutions['cmd'] == 'looking':\n target = solutions['target']\n robot = BulletWorld.robot\n pose_in_pan = transform(target, robot.get_link_position(\"head_pan_link\"))\n pose_in_tilt = transform(target, robot.get_link_position(\"head_tilt_link\"))\n\n new_pan = np.arctan([pose_in_pan[1], pose_in_pan[0]])\n new_tilt = np.arctan([-pose_in_tilt[2], pose_in_tilt[0]**2 + pose_in_tilt[1]**2])\n\n robot.set_joint_state(\"head_pan_joint\", new_pan[0])\n robot.set_joint_state(\"head_tilt_joint\", new_tilt[0])\n\n\nclass Pr2MoveGripper(ProcessModule):\n \"\"\"\n This process module controls the gripper of the robot. They can either be opened or closed.\n Furthermore, it can only moved one gripper at a time.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"move-gripper\":\n robot = BulletWorld.robot\n gripper = solution['gripper']\n motion = solution['motion']\n robot.set_joint_state(\"r_gripper_l_finger_joint\" if gripper == 'right' else \"l_gripper_l_finger_joint\",\n 0 if motion == \"close\" else 0.548)\n robot.set_joint_state(\"r_gripper_r_finger_joint\" if gripper == 'right' else \"l_gripper_r_finger_joint\",\n 0 if motion == \"close\" else 0.548)\n time.sleep(0.5)\n\n\nclass Pr2Detecting(ProcessModule):\n \"\"\"\n This process module tries to detect an object with the given type. To be detected the object has to be in\n the field of view of the robot.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"detecting\":\n robot = BulletWorld.robot\n object_type = solution['object']\n cam_frame_name = solution['cam_frame']\n front_facing_axis = solution['front_facing_axis']\n\n objects = BulletWorld.current_bullet_world.get_objects_by_type(object_type)\n for obj in objects:\n if btr.visible(obj, robot.get_link_position_and_orientation(cam_frame_name), front_facing_axis):\n return obj\n\n\nclass Pr2MoveTCP(ProcessModule):\n \"\"\"\n This process module moves the tool center point of either the right or the left arm.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"move-tcp\":\n target = solution['target']\n gripper = solution['gripper']\n robot = BulletWorld.robot\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), target)\n _apply_ik(robot, inv)\n time.sleep(0.5)\n\n\nclass Pr2MoveJoints(ProcessModule):\n \"\"\"\n This process modules moves the joints of either the right or the left arm. The joint states can be given as\n list that should be applied or a pre-defined position can be used, such as \"parking\"\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"move-joints\":\n robot = BulletWorld.robot\n right_arm_poses = solution['right-poses']\n left_arm_poses = solution['left-poses']\n if type(right_arm_poses) == dict:\n for joint, pose in right_arm_poses.items():\n robot.set_joint_state(joint, pose)\n elif type(right_arm_poses) == str and right_arm_poses == \"park\":\n _park_arms(\"right\")\n\n if type(left_arm_poses) == dict:\n for joint, pose in left_arm_poses.items():\n robot.set_joint_state(joint, pose)\n elif type(left_arm_poses) == str and left_arm_poses == \"park\":\n _park_arms(\"left\")\n\n time.sleep(0.5)\n\n\nclass Pr2WorldStateDetecting(ProcessModule):\n \"\"\"\n This process module detectes an object even if it is not in the field of view of the robot.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"world-state-detecting\":\n obj_type = solution['object']\n return list(filter(lambda obj: obj.type == obj_type, BulletWorld.current_bullet_world.objects))[0]\n\n\npr2_navigation = Pr2Navigation()\npr2_pick_up = Pr2PickUp()\npr2_place = Pr2Place()\npr2_park_arms = Pr2ParkArms()\npr2_move_head = Pr2MoveHead()\npr2_move_gripper = Pr2MoveGripper()\npr2_detecting = Pr2Detecting()\npr2_move_tcp = Pr2MoveTCP()\npr2_move_joints = Pr2MoveJoints()\npr2_world_state_detecting = Pr2WorldStateDetecting()\npr2_environment_manipulation = PR2EnvironmentManipulation()\n\n\ndef available_process_modules(desig):\n \"\"\"\n This method chooses the right process module for the given designator and returns it.\n :param desig: The designator for which a process module should be choosen.\n :return: The choosen process module\n \"\"\"\n if desig.check_constraints([('type', 'moving')]):\n return pr2_navigation\n\n if desig.check_constraints([('type', 'pick-up')]):\n return pr2_pick_up\n\n if desig.check_constraints([('type', 'place')]):\n return pr2_place\n\n if desig.check_constraints([('type', 'park-arms')]):\n return pr2_park_arms\n\n if desig.check_constraints([('type', 'looking')]):\n return pr2_move_head\n\n if desig.check_constraints([('type', 'opening-gripper')]):\n return pr2_move_gripper\n\n if desig.check_constraints([('type', 'closing-gripper')]):\n return pr2_move_gripper\n\n if desig.check_constraints([('type', 'detecting')]):\n return pr2_detecting\n\n if desig.check_constraints([('type', 'move-tcp')]):\n return pr2_move_tcp\n\n if desig.check_constraints([('type', 'move-arm-joints')]):\n return pr2_move_joints\n\n if desig.check_constraints([('type', 'world-state-detecting')]):\n return pr2_world_state_detecting\n\n if desig.check_constraints([('type', 'opening-prismatic')]):\n return pr2_environment_manipulation\n\n if desig.check_constraints([('type', 'closing-prismatic')]):\n return pr2_environment_manipulation\n\n if desig.check_constraints([('type', 'opening-rotational')]):\n return pr2_environment_manipulation\n\n if desig.check_constraints([('type', 'closing-rotational')]):\n return pr2_environment_manipulation\n\n\nProcessModule.resolvers.append(available_process_modules)\n", "sub_path": "demos/pycram_tasktree_demo/scripts/process_modules.py", "file_name": "process_modules.py", "file_ext": "py", "file_size_in_byte": 15895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 61, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 70, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 77, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 80, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 89, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 98, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 106, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 107, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 113, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world.get_objects_by_name", "line_number": 124, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 124, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 128, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 132, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 135, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 141, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 147, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 150, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 154, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 158, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 161, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 165, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 170, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 174, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 177, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 181, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 183, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 185, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 196, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 205, "usage_type": "name"}, {"api_name": "pycram.helper.transform", "line_number": 206, "usage_type": "call"}, {"api_name": "pycram.helper.transform", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 210, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 216, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 224, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 231, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 234, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 242, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world.get_objects_by_type", "line_number": 247, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 247, "usage_type": "name"}, {"api_name": "pycram.bullet_world_reasoning.visible", "line_number": 249, "usage_type": "call"}, {"api_name": "pycram.bullet_world_reasoning", "line_number": 249, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 253, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 262, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 262, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 263, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 265, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 268, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 276, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 276, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 291, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 294, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world", "line_number": 302, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 302, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule.resolvers.append", "line_number": 370, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule.resolvers", "line_number": 370, "usage_type": "attribute"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 370, "usage_type": "name"}]} +{"seq_id": "326359986", "text": "# Copyright 2020, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Define a template for a stateful process that produces metrics.\"\"\"\n\nimport attr\n\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.templates import iterative_process\n\n\n@attr.s(frozen=True, eq=False, slots=True)\nclass MeasuredProcessOutput:\n \"\"\"A structure containing the output of a `MeasuredProcess.next` computation.\n\n Attributes:\n state: A structure that will be passed to invocation of\n `MeasuredProcess.next`. Not intended for inspection externally, contains\n implementation details of the process.\n result: The result of the process given the current input and state. Using\n the rules of composition, either passed to input arguments of chained a\n `MeasuredProcess`, or concatenated with outputs of parallel\n `MeasuredProcess`es.\n measurements: Metrics derived from the computation of `result`. Intended for\n surfacing values to track the progress of a process that are not sent to\n chained `MeasuredProcess`es.\n \"\"\"\n state = attr.ib()\n result = attr.ib()\n measurements = attr.ib()\n\n# The type signature of the result of MeasuredProcess must be a named tuple\n# with the following names in the same order.\n_RESULT_FIELD_NAMES = [f.name for f in attr.fields(MeasuredProcessOutput)]\n\n\n# TODO(b/150384321): add method for performing the composition; current proposal\n# include a stadnalone `measure_process.compose(F, G)`, or implementing\n# `G.__call__(F)` to return a new MeasuredProcess.\nclass MeasuredProcess(iterative_process.IterativeProcess):\n \"\"\"A `tff.templates.IterativeProcess` with a specific output signature.\n\n A `tff.templates.MeasuredProcess` is a `tff.templates.IterativeProcess` that\n formalizes the output signature of the `next` property to be a named\n three-tuple ``. This definition enables\n `tff.templates.MeasuredProcess` to be composed following the rules below,\n something that wasn't possible with the more generic, less defined\n `tff.templates.IterativeProcess`.\n\n *Rules of Composition*\n Given two `MeasuredProcess` _F(x)_ and _G(y)_, a new composition _C_ is\n also a `MeasuredProcess` where:\n - `C.state` is the concatenation ``.\n - `C.result` is the result of _G_ applied to the result of\n _F_: `G(G.state, F(F.state, x).result).result`.\n - `C.measurements` is the concatenation ``.\n\n The resulting composition _C_ would have the following type signatures:\n initialize: `( -> )`\n next: `(<, F.input> -> ,\n result=G.result, measurements=)`\n \"\"\"\n\n def __init__(self, initialize_fn, next_fn):\n \"\"\"Creates a `tff.templates.MeasuredProcess`.\n\n Args:\n initialize_fn: A no-arg `tff.Computation` that creates the initial state\n of the measured process.\n next_fn: A `tff.Computation` that defines an iterated function. If\n `initialize_fn` returns a type `S`, then `next_fn` must return a\n `MeasuredProcessOutput` where the `state` attribute matches the type\n `S`, and accept either a single argument of type `S` or multiple\n arguments where the first argument must be of type `S`.\n\n Raises:\n TypeError: `initialize_fn` and `next_fn` are not compatible function\n types, or `next_fn` does not return a `MeasuredProcessOutput`.\n \"\"\"\n super().__init__(initialize_fn, next_fn)\n next_result_type = next_fn.type_signature.result\n if not (isinstance(next_result_type, computation_types.StructWithPythonType)\n and next_result_type.python_container is MeasuredProcessOutput):\n raise TypeError(\n 'MeasuredProcess must return a MeasuredProcessOutput. Received a '\n '({t}): {s}'.format(\n t=type(next_fn.type_signature.result),\n s=next_fn.type_signature.result))\n\n @property\n def next(self):\n \"\"\"A `tff.Computation` that runs one iteration of the process.\n\n Its first argument should always be the current state (originally produced\n by `tff.templates.MeasuredProcess.initialize`), and the return type must be\n a `tff.templates.MeasuredProcessOutput`.\n\n Returns:\n A `tff.Computation`.\n \"\"\"\n return self._next_fn\n", "sub_path": "tensorflow_federated/python/core/templates/measured_process.py", "file_name": "measured_process.py", "file_ext": "py", "file_size_in_byte": 4912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "attr.ib", "line_number": 38, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 39, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 40, "usage_type": "call"}, {"api_name": "attr.s", "line_number": 22, "usage_type": "call"}, {"api_name": "attr.fields", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow_federated.python.core.templates.iterative_process.IterativeProcess", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.python.core.templates.iterative_process", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow_federated.python.core.api.computation_types.StructWithPythonType", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.python.core.api.computation_types", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "397885104", "text": "# -*- coding: utf-8 -*-\n\nimport torch\nimport os\nimport pickle\nimport argparse\nimport numpy as np\nimport time\n\nimport pdb\n\n\nfrom torch.optim import Adam\nfrom torch.utils.data import Dataset, DataLoader\nfrom model import Word2Vec, SGNS, Spell2Vec, load_spelling, load_model, SpellHybrid2Vec\nimport linecache\n\nnp.set_printoptions(precision=4, suppress = True)\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', type=str, default='sgns', help=\"model name\")\n parser.add_argument('--data_dir', type=str, help=\"data directory path\")\n parser.add_argument('--save_dir', type=str, help=\"model directory path\")\n parser.add_argument('--eval_dir', type=str, help=\"eval directory path\")\n parser.add_argument('--embedding_size', type=int, default=200, help=\"embedding dimension\")\n parser.add_argument('--model', action='store',type=str, choices=set(['Word2Vec', 'Spell2Vec', 'SpellHybrid2Vec']), default='Word2Vec', help=\"which model to use\")\n parser.add_argument('--num_neg_samples', type=int, default=5, help=\"number of negative samples\")\n parser.add_argument('--epoch', type=int, default=10, help=\"number of epochs\")\n parser.add_argument('--batch_size', type=int, default=2000, help=\"mini-batch size\")\n parser.add_argument('--subsample_threshold', type=float, default=10e-4, help=\"subsample threshold\")\n parser.add_argument('--use_noise_weights', action='store_true', help=\"use weights for negative sampling\")\n parser.add_argument('--window', action='store', type=int, default=5, help=\"context window size\")\n parser.add_argument('--max_vocab', action='store', type=int, default=50000, help='max vocab size for word-level embeddings')\n parser.add_argument('--gpuid', type=int, default=-1, help=\"which gpu to use\")\n #Spell2Vec properties\n parser.add_argument('--char_embedding_size', type=int, default=20, help=\"size of char embeddings\")\n parser.add_argument('--char_composition', type=str, default='RNN',\n help=\"char composition function type\",\n choices=set(['RNN', 'CNN']), required=False)\n parser.add_argument('--dropout', type=float, default=0.3, help='dropout for RNN and projection layer')\n return parser.parse_args()\n\ndef my_collate(batch):\n iwords, owords = zip(* batch)\n iwords = torch.LongTensor(np.concatenate(iwords))\n owords = torch.LongTensor(np.concatenate([ow for ow in owords if ow.size > 0]))\n #target = torch.LongTensor(target)\n return [iwords, owords] #, target]\n\nclass LazyTextDataset(Dataset):\n def __init__(self, corpus_file, word2idx_file, unigram_prob, window = 5, max_vocab=1e8):\n self.corpus_file = corpus_file\n self.unk = ''\n self.bos = ''\n self.eos = ''\n self.bow = ''\n self.eow = ''\n self.pad = ''\n self.word2idx = pickle.load(open(word2idx_file, 'rb'))\n self.max_vocab = max_vocab if max_vocab < len(self.word2idx) else len(self.word2idx)\n ss_t = 0.005 * unigram_prob[3]\n print('effective subsample threshold', ss_t)\n self.ss = 1.0 - np.sqrt(ss_t/ unigram_prob)\n self.ss[[0,1,2]] = 0.0\n self.ss = np.clip(self.ss, 0, 1)\n self._total_data = 0\n self.window = window\n with open(self.corpus_file, \"r\", encoding=\"utf-8\") as f:\n self._total_data = len(f.readlines()) - 1\n self._total_data = 10\n\n\n def skipgram_instances(self, sentence):\n sentence = sentence.strip().split()\n if len(sentence) > 160:\n f = 80.0 / float(len(sentence))\n sentence= [s for s in sentence if np.random.rand() < f]\n iwords = []\n contexts = []\n s_idxs = [self.word2idx[word] \\\n if self.word2idx[word] < self.max_vocab else self.word2idx[self.unk] \\\n for word in sentence \\\n if (word in self.word2idx and self.ss[self.word2idx[word]] < np.random.rand())]\n if len(s_idxs) < 1:\n s_idxs= [self.word2idx[word] \\\n if self.word2idx[word] < self.max_vocab else self.word2idx[self.unk] \\\n for word in sentence \\\n if word in self.word2idx]\n #rands = np.random.rand(len(sentence))\n for i,iword in enumerate(s_idxs):\n #left = [l for l_idx,l in enumerate(s_idxs[:i],0) if self.ss[l] < rands[l_idx]][:self.window]\n left = s_idxs[max(i - self.window, 0): i]\n #right = [r for r_idx,r in enumerate(s_idxs[i+1:],i+1) if self.ss[r] < rands[r_idx]][:self.window]\n right = s_idxs[i + 1: i + 1 + self.window]\n bos_fill = [self.word2idx[self.bos]] * (self.window - len(left))\n eos_fill = [self.word2idx[self.eos]] * (self.window - len(right))\n context = bos_fill + left + right + eos_fill\n iwords.append(iword)\n contexts.append(context)\n return iwords, contexts\n\n def __getitem__(self, idx):\n line = linecache.getline(self.corpus_file, idx + 1)\n iwords, owords = self.skipgram_instances(line)\n iw, ows = np.array(list(iwords)), np.array(list(owords))\n return iw, ows\n\n def __len__(self):\n return self._total_data\n\n\ndef train(args):\n if args.gpuid > -1:\n torch.cuda.set_device(args.gpuid)\n tmp = torch.ByteTensor([0])\n torch.backends.cudnn.enabled = True\n tmp.cuda()\n print(\"using GPU\", args.gpuid)\n print('CUDNN VERSION', torch.backends.cudnn.version())\n else:\n print(\"using CPU\")\n idx2unigram_prob = pickle.load(open(os.path.join(args.data_dir, 'idx2unigram_prob.pkl'), 'rb'))\n idx, unigram_prob = zip(*sorted([(idx, p) for idx, p in idx2unigram_prob.items()]))\n unigram_prob = np.array(unigram_prob)\n if args.use_noise_weights:\n noise_unigram_prob = unigram_prob[:args.max_vocab] ** 0.75\n noise_unigram_prob = noise_unigram_prob / noise_unigram_prob.sum()\n else:\n noise_unigram_prob = None\n if args.model == 'Word2Vec':\n embedding_model = Word2Vec(word_vocab_size=args.max_vocab, embedding_size=args.embedding_size)\n elif args.model == 'Spell2Vec':\n char2idx = pickle.load(open(os.path.join(args.data_dir, 'char2idx.pkl'), 'rb'))\n wordidx2spelling, vocab_size, max_spelling_len = load_spelling(\n os.path.join(args.data_dir, 'wordidx2charidx.pkl'),\n )\n embedding_model = Spell2Vec(wordidx2spelling,\n word_vocab_size=args.max_vocab,\n noise_vocab_size=args.max_vocab, # len(noise_weights) if noise_weights is not None else 20000,\n char_vocab_size=len(char2idx),\n embedding_size=args.embedding_size,\n char_embedding_size=args.char_embedding_size,\n dropout=args.dropout,\n char_composition=args.char_composition,\n bidirectional=True)\n elif args.model == 'SpellHybrid2Vec':\n char2idx = pickle.load(open(os.path.join(args.data_dir, 'char2idx.pkl'), 'rb'))\n wordidx2spelling, vocab_size, max_spelling_len = load_spelling(\n os.path.join(args.data_dir, 'wordidx2charidx.pkl'),\n )\n embedding_model = SpellHybrid2Vec(wordidx2spelling,\n word_vocab_size=args.max_vocab,\n noise_vocab_size=args.max_vocab, # len(noise_weights) if noise_weights is not None else 20000,\n char_vocab_size=len(char2idx),\n embedding_size=args.embedding_size,\n char_embedding_size=args.char_embedding_size,\n dropout=args.dropout,\n char_composition=args.char_composition,\n bidirectional=True)\n\n else:\n raise NotImplementedError('unknown embedding model')\n dataset = LazyTextDataset(corpus_file=os.path.join(args.data_dir, 'corpus.txt'),\n word2idx_file=os.path.join(args.data_dir, 'word2idx.pkl'),\n unigram_prob=unigram_prob,\n window=args.window,\n max_vocab=args.max_vocab if args.model == 'Word2Vec' else 1e8)\n dataloader = DataLoader(dataset=dataset,\n batch_size=args.batch_size,\n shuffle=True,\n collate_fn=my_collate)\n total_batches = int(np.ceil(len(dataset) / args.batch_size))\n sgns = SGNS(embedding_model=embedding_model, num_neg_samples=args.num_neg_samples, weights=noise_unigram_prob)\n optim = Adam(sgns.parameters()) # , lr = 0.5)\n if args.gpuid > -1:\n sgns.init_cuda()\n\n if not os.path.isdir(args.save_dir):\n os.mkdir(args.save_dir)\n print(sgns)\n for epoch in range(1, args.epoch + 1):\n ave_time = 0.\n s = time.time()\n for batch_idx, batch in enumerate(dataloader):\n iword, owords = batch\n nwords = sgns.sample_noise(iword.size()[0])\n loss = sgns(iword, owords, nwords)\n optim.zero_grad()\n loss.backward()\n optim.step()\n if batch_idx % 10 == 0 and batch_idx > 0:\n e = time.time()\n ave_time = (e - s) / 10.\n s = time.time()\n print(\"e{:d} b{:5d}/{:5d} loss:{:7.4f} ave_time:{:7.4f}\\r\".format(epoch,\n batch_idx + 1,\n total_batches,\n loss.data[0],\n ave_time))\n path = args.save_dir + '/' + embedding_model.__class__.__name__ + '_e{:d}_loss{:.4f}'.format(epoch,\n loss.data[0])\n embedding_model.save_model(path)\n if args.eval_dir != '':\n eval_vecs = open(os.path.join(args.save_dir, 'vocab_vec.txt'), 'w', encoding='utf-8')\n eval_vocab = [ev.strip() for ev in\n open(os.path.join(args.eval_dir, 'fullVocab.txt'), 'r', encoding='utf-8').readlines()]\n word2idx = dataset.word2idx\n char2idx = pickle.load(open(os.path.join(args.data_dir, 'char2idx.pkl'), 'rb'))\n for ev in eval_vocab:\n ev_id = word2idx.get(ev, word2idx[''])\n if isinstance(embedding_model, Word2Vec):\n ev_id = ev_id if args.max_vocab > ev_id else word2idx['']\n vec = embedding_model.query(ev_id)\n else:\n ev_id = ev_id if args.max_vocab > ev_id else word2idx['']\n spelling = [char2idx['']] + [char2idx.get(i, char2idx['']) for i in ev] + [char2idx['']]\n spelling = spelling + [char2idx['']] * (max_spelling_len - len(spelling))\n vec = embedding_model.query(ev_id, spelling)\n vec = ','.join(['%4f' % i for i in vec.flatten()])\n eval_vecs.write(ev + ' ' + vec + '\\n')\n eval_vecs.close()\n\n\nif __name__ == '__main__':\n print(parse_args())\n train(parse_args())\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 11732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.set_printoptions", "line_number": 18, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 50, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "linecache.getline", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.cuda.set_device", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torch.ByteTensor", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.version", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "model.Word2Vec", "line_number": 131, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "model.load_spelling", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "model.Spell2Vec", "line_number": 137, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "model.load_spelling", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "model.SpellHybrid2Vec", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 172, "usage_type": "call"}, {"api_name": "model.SGNS", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 179, "usage_type": "call"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "time.time", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "model.Word2Vec", "line_number": 211, "usage_type": "argument"}]} +{"seq_id": "589934505", "text": "import numpy as np\nimport scipy.misc as m\nimport keras\n\nclass DataGen(keras.utils.Sequence):\n\n def __init__(self, data_lists=None, paths=None, window=-1, batch_size=256, n_outputs=2, shuffle=True, mode='RGB', load_all=True, imsize=[-1,-1], sampling=-1.0):\n # data_lists <- dict. List of names of the images, ground truth and (optional) masks.\n # paths <- dict. Paths to images, ground truth and (optional) masks\n # window <- int. Window size\n # load_all <- bool. True if all of the data can be load into memory\n self.images_list = data_lists['images']\n self.gt_list = data_lists['gt']\n self.mask_list = data_lists.get('mask')\n self.path_X = paths[\"images\"]\n self.path_y = paths[\"gt\"]\n self.path_mask = paths.get(\"mask\")\n self.window_side = int(window/2)\n self.batch_size = batch_size\n self.n_outputs = n_outputs\n self.imsize = imsize\n self.sampling = sampling\n self.mask_id = self.get_mask_id(self.mask_list)\n self.mode = mode\n self.load_all = load_all\n if self.load_all:\n self.data = self.load_data(images_list, gt_list)\n self.on_epoch_end()\n\n def __len__(self):\n return int(np.floor(len(self.mask_id)/self.batch_size))\n\n def __getitem__(self, index):\n ind = self.mask_id[index*self.batch_size:(index+1)*self.batch_size]\n if self.load_all:\n X,y = self.get_batch(ind.astype(int))\n else:\n X,y = self.datagen(ind.astype(int))\n return X,y\n\n def on_epoch_end(self):\n p = np.random.permutation(len(self.mask_id))\n self.mask_id = self.mask_id[p]\n\n def datagen(self, indices):\n X = []\n y = []\n ind = indices[indices[:,2].argsort()] #sort indices by image index.\n previous_img = -1\n win = self.window_side\n for i,j,id_img in ind:\n if id_img != previous_img:\n previous_img = id_img\n # Special case when only reading the green channel\n if self.mode == 'G':\n img = m.imread(self.path_X+self.images_list[id_img], mode='RGB')\n img = img[:,:,1]\n else:\n img = m.imread(self.path_X+self.images_list[id_img], mode=self.mode)\n gt = m.imread(self.path_y+self.gt_list[id_img], mode='L')\n\n H, W, *channels = img.shape\n # If i and j are within the image field\n if i - win > 0 and i + win + 1 < H and j - win > 0 and j + win + 1 < W:\n X.append(img[i-win:i+win+1, j-win:j+win+1])\n y.append(gt[i,j])\n\n X = np.asarray(X)\n if len(X.shape) == 3:\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n if self.n_outputs == 2:\n y = (np.asarray(y) > 0).astype(int)\n y = keras.utils.to_categorical(y, self.n_outputs)\n y = np.reshape(y, (y.shape[0], 1, 1, y.shape[1]))\n return X, y\n\n def load_data(self, images_list, gt_list):\n imgs = []\n labels = []\n for img_name, gt_name in zip(images_list, gt_list):\n if self.mode == 'G':\n img = m.imread(self.path_X+img_name, mode='RGB')\n img = img[:,:,1]\n else:\n img = m.imread(self.path_X+img_name, mode=self.mode)\n gt = m.imread(self.path_y+gt_name, mode='L')\n imgs.append(img)\n labels.append(gt)\n return imgs, labels\n\n def get_batch(self, ind):\n X = []\n y = []\n win = self.window_side\n for i, j, id_img in ind:\n H, W, *channels = self.data[0][id_img].shape\n if i - win > 0 and i + win + 1 < H and j - win > 0 and j + win + 1 < W:\n X.append(self.data[0][id_img][i-win:i+win+1, j-win:j+win+1])\n y.append(self.data[1][id_img][i,j])\n X = np.asarray(X)\n if len(X.shape) == 3:\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n if self.n_outputs == 2:\n y = (np.asarray(y) > 0).astype(int)\n y = keras.utils.to_categorical(y, self.n_outputs)\n y = np.reshape(y, (y.shape[0], 1, 1, y.shape[1]))\n return X, y\n\n def get_mask_id(self, mask_list):\n # Masks can be used to define pixels to be extracted\n win = self.window_side\n mask_id = np.array([]).reshape(0,3)\n n = len(self.images_list)\n\n # If there is a mask for each image\n if mask_list:\n for i, mask in enumerate(mask_list):\n image = m.imread(self.path_mask+mask, mode='L')\n idx, idy = np.nonzero(image[win:-win, win:-win])\n maskid = np.ones(len(idx), dtype=int)*i\n mask_id = np.concatenate((mask_id, np.dstack((idx,idy,maskid))[0]))\n else:\n # If images are the same size, we can easily create mask_ids\n if self.imsize[0] > 0:\n H, W = self.imsize\n mask_id = np.array(np.meshgrid(np.arange(H), np.arange(W), np.arange(n))).T.reshape(-1,3)\n\n # Otherwise, we need to read every image and find their size\n else:\n for i, img in enumerate(self.images_list):\n image = m.imread(self.path_X+img, mode='L')\n H, W, *C = image.shape\n ids = [[h,w,i] for h in range(H) for w in range(W)]\n mask_id = np.concatenate((mask_id, np.array(ids)))\n\n if sampling > 0:\n n_ids = np.floor(len(mask_id) * self.sampling)\n mask_id = mask_id[:n_ids]\n return mask_id\n\nclass DataGenPatches(DataGen):\n\n def __init__(self, l_patch, s_patch, **kw):\n self.l_patch = l_patch\n self.s_patch = s_patch\n DataGen.__init__(self, **kw)\n\n def get_mask_id(self, mask_list):\n l_patch = self.l_patch\n s_patch = self.s_patch\n mask_id = np.array([]).reshape(0,3)\n n = len(self.images_list)\n\n if self.imsize[0] > 0:\n H, W = self.imsize\n px = np.arange(l_patch//2+1, H - l_patch//2, s_patch)\n py = np.arange(l_patch//2+1, W - l_patch//2, s_patch)\n pz = np.arange(n)\n mask_id = np.array(np.meshgrid(px, py, pz)).T.reshape(-1,3)\n else:\n for i, img in enumerate(self.images_list):\n image = m.imread(self.path_X+img, mode='L')\n H, W, *C = image.shape\n px = np.arange(l_patch//2+1, H - l_patch//2, s_patch)\n py = np.arange(l_patch//2+1, W - l_patch//2, s_patch)\n ids = np.array(np.meshgrid(px, py, i)).T.reshape(-1,3)\n mask_id = np.concatenate((mask_id, np.array(ids)))\n\n if self.sampling > 0:\n n_ids = int(len(mask_id) * self.sampling)\n mask_id = mask_id[:n_ids]\n\n return mask_id\n\n def datagen(self, indices):\n X = []\n y = []\n ind = indices[indices[:,2].argsort()] #sort indices by image index.\n hl_patch_beg = self.l_patch//2\n hl_patch_end = self.l_patch//2 + self.l_patch%2\n hs_patch_beg = self.s_patch//2\n hs_patch_end = self.s_patch//2 + self.s_patch%2\n previous_img = -1\n for i,j,id_img in ind:\n if id_img != previous_img:\n previous_img = id_img\n # Special case when only reading the green channel\n if self.mode == 'G':\n img = m.imread(self.path_X+self.images_list[id_img], mode='RGB')\n img = img[:,:,1]\n else:\n img = m.imread(self.path_X+self.images_list[id_img], mode=self.mode)\n gt = m.imread(self.path_y+self.gt_list[id_img], mode='L')\n\n H, W, *channels = img.shape\n # If i and j are within the image field\n if i - hl_patch_beg > 0 and i + hl_patch_end < H and j - hl_patch_beg > 0 and j + hl_patch_end < W:\n X.append(img[i-hl_patch_beg:i+hl_patch_end, j-hl_patch_beg:j+hl_patch_end])\n y.append(gt[i-hs_patch_beg:i+hs_patch_end, j-hs_patch_beg:j+hs_patch_end])\n\n X = np.asarray(X)\n X = X.astype('float32')/255.0\n if len(X.shape) == 3:\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n if self.n_outputs == 2:\n y = (np.asarray(y) > 0).astype(int)\n y = keras.utils.to_categorical(y, self.n_outputs)\n# y = np.reshape(y, (y.shape[0], s_patch, s_patch, y.shape[1]))\n return X, y\n\n", "sub_path": "utils/datagen.py", "file_name": "datagen.py", "file_ext": "py", "file_size_in_byte": 8561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.utils", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 42, "usage_type": "attribute"}, {"api_name": "scipy.misc.imread", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 56, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 59, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 82, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 82, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 85, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 118, "usage_type": "name"}, {"api_name": "numpy.nonzero", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 126, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 159, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 162, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 189, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 189, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 192, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 192, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 193, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 206, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 207, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 207, "usage_type": "attribute"}]} +{"seq_id": "202263342", "text": "from __future__ import annotations\n\nimport requests\n\nfrom .powermeter import PowerMeasurementResult, PowerMeter\n\n\nclass ShellyPowerMeter(PowerMeter):\n def __init__(self, shelly_ip):\n self.meter_uri = \"http://{}/status/\".format(shelly_ip)\n\n def get_power(self) -> PowerMeasurementResult:\n r = requests.get(self.meter_uri, timeout=5)\n json = r.json()\n return PowerMeasurementResult(\n float(json[\"meters\"][0][\"power\"]),\n float(json[\"meters\"][0][\"timestamp\"]),\n )\n", "sub_path": "utils/measure_v2/powermeter/shelly.py", "file_name": "shelly.py", "file_ext": "py", "file_size_in_byte": 523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "powermeter.PowerMeter", "line_number": 8, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "powermeter.PowerMeasurementResult", "line_number": 15, "usage_type": "call"}, {"api_name": "powermeter.PowerMeasurementResult", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "167763421", "text": "import asyncio\nimport websockets\n\ncmdCon = 'USB CONNECTED'\ncmdDis = 'USB DISCONNECTED'\ncmdUnplug = 'unplug'\nuriServer = \"ws://localhost:8765\"\n\nasync def main():\n async with websockets.connect(uriServer) as websocket:\n while True:\n try:\n await websocket.send(cmdCon)\n income = await websocket.recv()\n if income == cmdUnplug:\n await websocket.send(cmdDis)\n exit()\n except websockets.ConnectionClosed:\n print(f\"Connection closed\")\n break\n\nasyncio.get_event_loop().run_until_complete(main())\nasyncio.get_event_loop().run_forever()", "sub_path": "ws/utils/python/connect.py", "file_name": "connect.py", "file_ext": "py", "file_size_in_byte": 671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "websockets.connect", "line_number": 10, "usage_type": "call"}, {"api_name": "websockets.ConnectionClosed", "line_number": 18, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 22, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "299887841", "text": "import logging\nfrom datetime import datetime\nfrom textwrap import shorten\n\nimport requests\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.shortcuts import redirect, render\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom model_utils import Choices\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom taggit.managers import TaggableManager\nfrom taggit.models import TaggedItemBase\nfrom wagtail.wagtailadmin.edit_handlers import (FieldPanel, InlinePanel, PageChooserPanel, MultiFieldPanel,\n StreamFieldPanel)\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.fields import RichTextField, StreamField\nfrom wagtail.wagtailcore.models import Page, Orderable\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsearch import index\nfrom wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom core.fields import MarkdownField\nfrom core.models import MemberProfile, Platform, Event, Job\nfrom core.utils import get_canonical_image\nfrom home.forms import ContactForm\nfrom library.models import Codebase\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nWagtail Page models and related supporting Models and Settings\n\n\"\"\"\n\n\nclass UserMessage(models.Model):\n \"\"\"\n FIXME: consider removing this class, use email for messaging.\n \"\"\"\n user = models.ForeignKey(User, related_name='inbox')\n sender = models.ForeignKey(User, related_name='outbox')\n message = models.CharField(max_length=512)\n date_created = models.DateTimeField(auto_now_add=True)\n read_on = models.DateTimeField(null=True, blank=True)\n\n def is_read(self):\n return self.read_on is not None\n\n\nclass LinkFields(models.Model):\n \"\"\"\n Cribbed from github.com/wagtail/wagtaildemo\n \"\"\"\n link_external = models.URLField(\"External link\", blank=True)\n link_page = models.ForeignKey(\n Page,\n null=True,\n blank=True,\n related_name='+'\n )\n link_codebase = models.ForeignKey(\n 'library.Codebase',\n null=True,\n blank=True,\n related_name='+'\n )\n\n @property\n def link(self):\n if self.link_page:\n return self.link_page.url\n elif self.link_codebase:\n return self.link_codebase.get_absolute_url()\n else:\n return self.link_external\n\n panels = [\n FieldPanel('link_external'),\n PageChooserPanel('link_page'),\n # figure out how to manually link codebase / events / jobs into FeaturedContentItem\n # CodebaseChooserPanel('link_codebase'),\n ]\n\n class Meta:\n abstract = True\n\n\nclass CarouselItem(LinkFields):\n image = models.ForeignKey('wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n codebase_image = models.ForeignKey('library.CodebaseImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n embed_url = models.URLField(\"Embed URL\", blank=True)\n caption = models.CharField(max_length=255)\n summary = models.TextField(max_length=600, blank=True)\n title = models.CharField(max_length=255)\n panels = [\n ImageChooserPanel('image'),\n ImageChooserPanel('codebase_image'),\n FieldPanel('embed_url'),\n FieldPanel('caption'),\n FieldPanel('title'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n @property\n def featured_image(self):\n if self.image:\n return self.image\n elif self.codebase_image:\n return self.codebase_image\n return None\n\n class Meta:\n abstract = True\n\n\nclass FeaturedContentItem(Orderable, CarouselItem):\n page = ParentalKey('home.LandingPage', related_name='featured_content_queue')\n\n\nclass LandingPage(Page):\n template = 'home/index.jinja'\n FEATURED_CONTENT_COUNT = 6\n MAX_CALLOUT_ENTRIES = 3\n RECENT_FORUM_ACTIVITY_COUNT = 5\n\n mission_statement = models.CharField(max_length=512)\n community_statement = models.TextField()\n\n def get_featured_content(self):\n return self.featured_content_queue.all()[:self.FEATURED_CONTENT_COUNT]\n\n def get_recent_forum_activity(self):\n # FIXME: move to dedicated discourse module / api as we integrate more tightly with discourse\n # Discourse API endpoint documented at http://docs.discourse.org/#tag/Topics%2Fpaths%2F~1latest.json%2Fget\n if settings.DEBUG:\n random_submitters = User.objects.filter(pk__in=(3, 5, 7, 11, 13, 17))\n return [\n {\n 'title': \"Generated Forum Topic {}\".format(i),\n 'submitter': random_submitters[i],\n 'date_created': datetime.now(),\n 'url': \"https://forum.example.com/topic/{}\".format(i),\n }\n for i in range(self.RECENT_FORUM_ACTIVITY_COUNT)\n ]\n\n # FIXME: refactor and clean up logic, extract to a sensible discourse api\n r = requests.get('{0}/{1}'.format(settings.DISCOURSE_BASE_URL, 'latest.json'),\n params={'order': 'created', 'sort': 'asc'})\n posts_dict = r.json()\n topics = posts_dict['topic_list']['topics']\n recent_forum_activity = cache.get('recent_forum_activity')\n if recent_forum_activity:\n return recent_forum_activity\n # transform topics list of dictionaries into web template format with title, submitter, date_created, and url.\n\n recent_forum_activity = []\n # stuff this in the Redis Cache.\n for topic in topics[:self.RECENT_FORUM_ACTIVITY_COUNT]:\n topic_title = topic['title']\n topic_url = '{0}/t/{1}/{2}'.format(settings.DISCOURSE_BASE_URL,\n topic['slug'],\n topic['id'])\n # getting back to the original submitter will involve some trickery.\n # The Discourse embed Javascript queues up a crawler to hit the given page and parses it for content to use\n # as the initial topic text. However, this topic gets added as a specific Discourse User (`comses`,\n # see https://meta.discourse.org/t/embedding-discourse-comments-via-javascript/31963/150 for more details)\n # and so we won't always have the direct username of the submitter without looking it up by\n # 1. Discourse category_id (6 = jobs & appointments, 7 = events, 8 = codebase)\n # 2. Title (not guaranteed to be unique)\n\n last_poster_username = topic['last_poster_username']\n submitter = None\n submitter_url = None\n if last_poster_username == 'comses':\n category_id = topic['category_id']\n logger.debug(\"category id: %s, topic title: %s, topic: %s\", category_id, topic_title, topic)\n # special case lookup for real submitter\n # FIXME: get rid of magic constants\n target_object = None\n if category_id == 6:\n # jobs and appointments\n target_object = Job.objects.filter(title=topic_title).order_by('-date_created').first()\n elif category_id == 7:\n # events\n target_object = Event.objects.filter(title=topic_title).order_by('-date_created').first()\n elif category_id == 8:\n target_object = Codebase.objects.filter(title=topic_title).order_by('-date_created').first()\n if target_object:\n submitter = target_object.submitter\n submitter_url = submitter.member_profile.get_absolute_url()\n else:\n submitter = User.objects.get(username='AnonymousUser')\n else:\n try:\n submitter = User.objects.get(username=last_poster_username)\n except User.DoesNotExist:\n pass\n recent_forum_activity.append(\n {\n 'title': topic_title,\n 'submitter_name': submitter.username,\n 'submitter_url': submitter_url,\n # FIXME: handle created_at=None gracefully, via default date?\n 'date_created': datetime.strptime(topic.get('created_at'), \"%Y-%m-%dT%H:%M:%S.%fZ\"),\n 'url': topic_url,\n }\n )\n cache.set('recent_forum_activity', recent_forum_activity, 3600)\n return recent_forum_activity\n\n def get_latest_jobs(self):\n return Job.objects.order_by('-date_created')[:self.MAX_CALLOUT_ENTRIES]\n\n def get_upcoming_events(self):\n return Event.objects.upcoming().order_by('start_date')[:self.MAX_CALLOUT_ENTRIES]\n\n def get_context(self, request, *args, **kwargs):\n context = super(LandingPage, self).get_context(request, *args, **kwargs)\n context['featured_content'] = self.get_featured_content()\n context['recent_forum_activity'] = self.get_recent_forum_activity()\n context['latest_jobs'] = self.get_latest_jobs()\n context['upcoming_events'] = self.get_upcoming_events()\n return context\n\n content_panels = Page.content_panels + [\n FieldPanel('mission_statement', widget=forms.Textarea),\n FieldPanel('community_statement'),\n InlinePanel('featured_content_queue', label=_('Featured Content')),\n ]\n\n\nclass CategoryIndexItem(Orderable, models.Model):\n page = ParentalKey('home.CategoryIndexPage', related_name='callouts')\n image = models.ForeignKey('wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n url = models.CharField(\"Relative path, absolute path, or URL\", max_length=200, blank=True)\n title = models.CharField(max_length=255)\n caption = models.CharField(max_length=600)\n\n def __str__(self):\n return \"{0} {1}\".format(self.title, self.url)\n\n\nclass SubnavigationMenu():\n pass\n\n\nclass SubNavigationLink(Orderable, models.Model):\n page = ParentalKey(Page, related_name='navigation_links')\n url = models.CharField(\"Relative path, absolute path, or full URL\", max_length=255)\n title = models.CharField(max_length=128)\n\n\nclass Breadcrumb(Orderable, models.Model):\n page = ParentalKey(Page, related_name='breadcrumbs')\n url = models.CharField(\"Relative / absolute path or full URL\", max_length=255, blank=True)\n title = models.CharField(max_length=255)\n\n def __str__(self):\n return '{0}: {1}'.format(self.title, self.url)\n\n\nclass NavigationMixin(object):\n def add_breadcrumbs(self, breadcrumb_tuples):\n self._add_tuples(breadcrumb_tuples, Breadcrumb)\n\n def get_breadcrumbs(self):\n return [\n {'url': item.url, 'text': item.title}\n for item in self.breadcrumbs.all()\n ]\n\n def _add_tuples(self, tuples, cls):\n related_name = cls._meta.get_field('page').related_query_name()\n related_manager = getattr(self, related_name)\n for idx, (title, url) in enumerate(tuples):\n related_manager.add(\n cls(title=title, url=url, sort_order=idx)\n )\n\n def add_navigation_links(self, navigation_tuples):\n \"\"\"\n Takes an ordered list of tuples and adds them as navigation links.\n :param navigation_tuples:\n :return:\n \"\"\"\n self._add_tuples(navigation_tuples, SubNavigationLink)\n\n def get_navigation_links(self):\n \"\"\"\n Returns a nested dict for use by the subnav Jinja2 tag.\n :return:\n \"\"\"\n return [\n {'url': nav.url, 'text': nav.title, 'active': nav.url.endswith(self.slug + '/')}\n for nav in self.navigation_links.all()\n ]\n\n\nclass CategoryIndexPage(NavigationMixin, Page):\n template = models.CharField(max_length=128, default='home/category_index.jinja')\n heading = models.CharField(max_length=128, help_text=_(\"Short name to be placed in introduction header.\"))\n summary = models.CharField(max_length=1000, help_text=_('Summary blurb for this category index page.'))\n\n def add_callout(self, image_path, title, caption, sort_order=None, user=None, url=''):\n if user is None:\n user = User.objects.get(username='alee')\n _image = get_canonical_image(title=title, path=image_path, user=user)\n self.callouts.add(\n CategoryIndexItem(\n title=title,\n sort_order=sort_order,\n caption=caption,\n image=_image,\n url=url,\n )\n )\n\n content_panels = Page.content_panels + [\n # don't expose template to web form for now, could wreak havoc\n FieldPanel('heading'),\n FieldPanel('template'),\n FieldPanel('summary', widget=forms.Textarea),\n InlinePanel('callouts', label=_('Captioned Image Callouts')),\n InlinePanel('navigation_links', label=_('Subnavigation Links')),\n ]\n\n search_fields = Page.search_fields + [\n index.SearchField('summary')\n ]\n\n\nclass StreamPage(Page, NavigationMixin):\n template = models.CharField(max_length=128, default='home/stream_page.jinja')\n date = models.DateField(\"Post date\", default=timezone.now)\n description = models.CharField(max_length=512, blank=True)\n\n body = StreamField([\n ('heading', blocks.CharBlock(classname='full title')),\n ('paragraph', blocks.RichTextBlock()),\n ('image', ImageChooserBlock()),\n ('url', blocks.URLBlock(required=False))\n ])\n\n content_panels = Page.content_panels + [\n FieldPanel('date'),\n FieldPanel('description'),\n StreamFieldPanel('body'),\n ]\n\n\nclass MarkdownPage(NavigationMixin, Page):\n template = models.CharField(max_length=128, default='home/markdown_page.jinja')\n heading = models.CharField(max_length=128, blank=True)\n date = models.DateField(\"Post date\", default=timezone.now)\n description = MarkdownField(max_length=512, blank=True)\n body = MarkdownField(blank=True)\n jumbotron = models.BooleanField(\n default=True,\n help_text=_(\"True if this page should display its title and description in a jumbotron\"))\n\n content_panels = Page.content_panels + [\n FieldPanel('heading'),\n FieldPanel('date'),\n FieldPanel('description'),\n FieldPanel('body'),\n ]\n\n search_fields = Page.search_fields + [\n index.SearchField('date'),\n index.SearchField('description'),\n index.SearchField('body')\n ]\n\n\nclass ContactPage(NavigationMixin, Page):\n template = 'home/about/contact.jinja'\n description = models.CharField(max_length=512, blank=True)\n\n def serve(self, request):\n if request.method == 'POST':\n form = ContactForm(request=request, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('home:contact-sent')\n else:\n form = ContactForm(request)\n\n return render(request, self.template, {\n 'page': self,\n 'form': form,\n })\n\n content_panels = Page.content_panels + [\n FieldPanel('description')\n ]\n\n\nclass PlatformSnippetPlacement(Orderable, models.Model):\n page = ParentalKey('home.PlatformIndexPage', related_name='platform_placements')\n platform = models.ForeignKey(Platform, related_name='+')\n\n class Meta:\n verbose_name = 'platform placement'\n verbose_name_plural = 'platform placements'\n\n panels = [\n SnippetChooserPanel('platform'),\n ]\n\n def __str__(self):\n return \"Snippet placement for {0}\".format(self.platform.name)\n\n\nclass PlatformIndexPage(NavigationMixin, Page):\n template = 'home/resources/platforms.jinja'\n description = models.TextField(blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n InlinePanel('platform_placements', label='Platforms'),\n ]\n\n def get_platforms(self):\n # highlight featured platforms? allow the community to rank them.\n return self.platform_placements.all()\n\n def get_context(self, request):\n context = super().get_context(request)\n # FIXME: add pagination\n context['platforms'] = self.get_platforms()\n return context\n\n\nclass JournalTag(TaggedItemBase):\n content_object = ParentalKey('home.Journal', related_name='tagged_journals')\n\n\n@register_snippet\nclass Journal(index.Indexed, ClusterableModel):\n name = models.CharField(max_length=255)\n url = models.URLField()\n issn = models.CharField(max_length=16, blank=True, help_text=_(\"Linking ISSN-L for this Journal\"))\n description = models.CharField(max_length=1000)\n tags = TaggableManager(through=JournalTag, blank=True)\n\n panels = [\n FieldPanel('title'),\n FieldPanel('url'),\n FieldPanel('issn'),\n FieldPanel('description', widget=forms.Textarea),\n FieldPanel('tags'),\n ]\n\n search_fields = [\n index.SearchField('name'),\n index.SearchField('description'),\n index.SearchField('issn'),\n index.RelatedFields('tags', [\n index.SearchField('name'),\n ]),\n ]\n\n\nclass JournalSnippetPlacement(Orderable, models.Model):\n page = ParentalKey('home.JournalIndexPage', related_name='journal_placements')\n journal = models.ForeignKey(Journal, related_name='+')\n\n class Meta:\n verbose_name = 'journal placement'\n verbose_name_plural = 'journal placements'\n\n\nclass JournalIndexPage(NavigationMixin, Page):\n template = 'home/resources/journals.jinja'\n description = models.TextField(blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n InlinePanel('journal_placements', label='Journals'),\n ]\n\n\n@register_snippet\nclass FaqEntry(index.Indexed, models.Model):\n FAQ_CATEGORIES = Choices(\n ('abm', _('Agent-based Modeling Questions')),\n ('general', _('General CoMSES Net Questions')),\n ('model-library', _('Computational Model Library Questions')),\n )\n category = models.CharField(max_length=32, choices=FAQ_CATEGORIES, default=FAQ_CATEGORIES.general)\n question = models.CharField(max_length=128, help_text=_(\"Short question\"))\n answer = models.TextField(help_text=_(\"Markdown formatted answer\"))\n date_created = models.DateTimeField(auto_now=True)\n last_modified = models.DateTimeField(auto_now_add=True)\n submitter = models.ForeignKey(User, blank=True, null=True)\n\n def __str__(self):\n return \"[{0}] {1} {2}\".format(self.category, self.question, shorten(self.answer, 140))\n\n\nclass FaqEntryPlacement(Orderable, models.Model):\n page = ParentalKey('home.FaqPage', related_name='faq_entry_placements')\n faq_entry = models.ForeignKey(FaqEntry, related_name='+')\n\n class Meta:\n verbose_name = 'faq placement'\n\n\nclass FaqPage(Page, NavigationMixin):\n template = 'home/about/faq.jinja'\n description = models.CharField(max_length=1000)\n\n def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n # FIXME: add pagination\n context['faq_entries'] = FaqEntry.objects.all()\n context['faq_categories'] = FaqEntry.FAQ_CATEGORIES\n return context\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n InlinePanel('faq_entry_placements', label='FAQ Entries')\n ]\n\n search_fields = Page.search_fields + [\n index.RelatedFields('faq_entry_placements', [\n index.SearchField('faq_entry')\n ])\n ]\n\n\nclass PeopleEntryPlacement(Orderable, models.Model):\n CATEGORIES = Choices(\n (1, 'directorate', _('Directorate')),\n (2, 'board', _('Executive Board')),\n (3, 'digest', _('CoMSES Digest Editors')),\n (4, 'staff', _('Staff')),\n (5, 'alumni', _('Executive Board Alumni')),\n )\n page = ParentalKey('home.PeoplePage', related_name='people_entry_placements')\n member_profile = models.ForeignKey('core.MemberProfile', related_name='+')\n category = models.PositiveIntegerField(choices=CATEGORIES, default=CATEGORIES.board)\n\n def __str__(self):\n return \"{0}: {1} {2}\".format(self.sort_order, self.member_profile, self.category)\n\n class Meta:\n verbose_name = 'people entry placement'\n\n\nclass PeoplePage(Page, NavigationMixin):\n template = 'home/about/people.jinja'\n heading = models.CharField(max_length=64)\n description = models.CharField(max_length=1000, blank=True)\n\n def add_users(self, category, usernames, offset):\n for idx, username in enumerate(usernames):\n # manually iterate and get MemberProfile to enforce original ordering\n profile = MemberProfile.objects.get(user__username=username)\n self.people_entry_placements.add(\n PeopleEntryPlacement(sort_order=offset + idx,\n member_profile=profile,\n category=category)\n )\n\n def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n context['people_categories'] = PeopleEntryPlacement.CATEGORIES\n return context\n\n content_panels = Page.content_panels + [\n FieldPanel('heading'),\n FieldPanel('description'),\n InlinePanel('people_entry_placements', label='People Entries')\n ]\n\n\nclass NewsIndexPage(Page):\n def get_context(self, request):\n context = super(NewsIndexPage, self).get_context(request)\n context['news_entries'] = NewsPage.objects.child_of(self).live()\n return context\n\n\nclass NewsPage(Page):\n body = RichTextField()\n date = models.DateField(\"Post date\")\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('body'),\n index.FilterField('date')\n ]\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('date'),\n FieldPanel('body', classname=\"full\"),\n InlinePanel('related_links', label=\"Related links\"),\n ]\n\n promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n ]\n\n # Parent page / subpage type rules\n parent_page_types = ['home.NewsIndexPage']\n subpage_types = []\n\n\nclass NewsPageRelatedLink(Orderable):\n page = ParentalKey(NewsPage, related_name='related_links')\n name = models.CharField(max_length=255)\n url = models.URLField()\n\n panels = [\n FieldPanel('name'),\n FieldPanel('url'),\n ]\n", "sub_path": "django/home/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 23503, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 48, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 49, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 63, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 64, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 86, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.PageChooserPanel", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 102, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 102, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 105, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 108, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 110, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 110, "usage_type": "name"}, {"api_name": "wagtail.wagtailimages.edit_handlers.ImageChooserPanel", "line_number": 112, "usage_type": "call"}, {"api_name": "wagtail.wagtailimages.edit_handlers.ImageChooserPanel", "line_number": 113, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 114, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 115, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 116, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.MultiFieldPanel", "line_number": 117, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 132, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 133, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 142, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 143, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 143, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 151, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 151, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 152, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 152, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 157, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 157, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 164, "usage_type": "call"}, {"api_name": "django.conf.settings.DISCOURSE_BASE_URL", "line_number": 164, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 164, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 168, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 168, "usage_type": "name"}, {"api_name": "django.conf.settings.DISCOURSE_BASE_URL", "line_number": 177, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 177, "usage_type": "name"}, {"api_name": "core.models.Job.objects.filter", "line_number": 199, "usage_type": "call"}, {"api_name": "core.models.Job.objects", "line_number": 199, "usage_type": "attribute"}, {"api_name": "core.models.Job", "line_number": 199, "usage_type": "name"}, {"api_name": "core.models.Event.objects.filter", "line_number": 202, "usage_type": "call"}, {"api_name": "core.models.Event.objects", "line_number": 202, "usage_type": "attribute"}, {"api_name": "core.models.Event", "line_number": 202, "usage_type": "name"}, {"api_name": "library.models.Codebase.objects.filter", "line_number": 204, "usage_type": "call"}, {"api_name": "library.models.Codebase.objects", "line_number": 204, "usage_type": "attribute"}, {"api_name": "library.models.Codebase", "line_number": 204, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 209, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 209, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 212, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 212, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 212, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 213, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 213, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 221, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 225, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 225, "usage_type": "name"}, {"api_name": "core.models.Job.objects.order_by", "line_number": 229, "usage_type": "call"}, {"api_name": "core.models.Job.objects", "line_number": 229, "usage_type": "attribute"}, {"api_name": "core.models.Job", "line_number": 229, "usage_type": "name"}, {"api_name": "core.models.Event.objects.upcoming", "line_number": 232, "usage_type": "call"}, {"api_name": "core.models.Event.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "core.models.Event", "line_number": 232, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 242, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 242, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 243, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 243, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 243, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 244, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 245, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 245, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 249, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 249, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 249, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 250, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 251, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 251, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 254, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 254, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 256, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 256, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 257, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 257, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 258, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 258, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 268, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 268, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 268, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 269, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 269, "usage_type": "argument"}, {"api_name": "django.db.models.CharField", "line_number": 270, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 270, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 271, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 271, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 274, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 274, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 274, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 275, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 275, "usage_type": "argument"}, {"api_name": "django.db.models.CharField", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 276, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 277, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 277, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 320, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 321, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 321, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 322, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 322, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 322, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 323, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 323, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 327, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 327, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 327, "usage_type": "name"}, {"api_name": "core.utils.get_canonical_image", "line_number": 328, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 339, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 339, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 341, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 342, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 343, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 343, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 343, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 344, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 344, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 345, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 345, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 348, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 348, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 349, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 349, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 353, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 354, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 354, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 355, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 355, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 355, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 355, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 356, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 356, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.fields.StreamField", "line_number": 358, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks.CharBlock", "line_number": 359, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks", "line_number": 359, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.blocks.RichTextBlock", "line_number": 360, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks", "line_number": 360, "usage_type": "name"}, {"api_name": "wagtail.wagtailimages.blocks.ImageChooserBlock", "line_number": 361, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks.URLBlock", "line_number": 362, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks", "line_number": 362, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 365, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 365, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 366, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 367, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.StreamFieldPanel", "line_number": 368, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 372, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 373, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 373, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 374, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 374, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 375, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 375, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 375, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 375, "usage_type": "name"}, {"api_name": "core.fields.MarkdownField", "line_number": 376, "usage_type": "call"}, {"api_name": "core.fields.MarkdownField", "line_number": 377, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 378, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 378, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 380, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 382, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 382, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 383, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 384, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 385, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 386, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 389, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 389, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 390, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 390, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 391, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 391, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 392, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 392, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 396, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 398, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 398, "usage_type": "name"}, {"api_name": "home.forms.ContactForm", "line_number": 402, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 405, "usage_type": "call"}, {"api_name": "home.forms.ContactForm", "line_number": 407, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 409, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 414, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 414, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 415, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 419, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 419, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 419, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 420, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 421, "usage_type": "call"}, {"api_name": "core.models.Platform", "line_number": 421, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 421, "usage_type": "name"}, {"api_name": "wagtail.wagtailsnippets.edit_handlers.SnippetChooserPanel", "line_number": 428, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 435, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 437, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 437, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 439, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 439, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 440, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 441, "usage_type": "call"}, {"api_name": "taggit.models.TaggedItemBase", "line_number": 455, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 456, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index.Indexed", "line_number": 460, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 460, "usage_type": "name"}, {"api_name": "modelcluster.models.ClusterableModel", "line_number": 460, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 461, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 461, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 462, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 462, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 463, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 463, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 463, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 464, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 464, "usage_type": "name"}, {"api_name": "taggit.managers.TaggableManager", "line_number": 465, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 468, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 469, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 470, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 471, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 471, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 471, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 472, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 476, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 476, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 477, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 477, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 478, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 478, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.RelatedFields", "line_number": 479, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 479, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 480, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 480, "usage_type": "name"}, {"api_name": "wagtail.wagtailsnippets.models.register_snippet", "line_number": 459, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 485, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 485, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 485, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 486, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 487, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 487, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 494, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 496, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 496, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 498, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 498, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 499, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 500, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index.Indexed", "line_number": 505, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 505, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 505, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 505, "usage_type": "name"}, {"api_name": "model_utils.Choices", "line_number": 506, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 507, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 508, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 509, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 511, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 511, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 512, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 512, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 512, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 513, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 513, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 513, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 514, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 514, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 515, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 515, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 516, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 516, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 516, "usage_type": "name"}, {"api_name": "textwrap.shorten", "line_number": 519, "usage_type": "call"}, {"api_name": "wagtail.wagtailsnippets.models.register_snippet", "line_number": 504, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 522, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 522, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 522, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 523, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 524, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 524, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 530, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 532, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 532, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 541, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 541, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 542, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 543, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 546, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 546, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.RelatedFields", "line_number": 547, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 547, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 548, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 548, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 553, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 553, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 553, "usage_type": "name"}, {"api_name": "model_utils.Choices", "line_number": 554, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 555, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 556, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 557, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 558, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 559, "usage_type": "call"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 561, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 562, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 562, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 563, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 563, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 572, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 574, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 574, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 575, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 575, "usage_type": "name"}, {"api_name": "core.models.MemberProfile.objects.get", "line_number": 580, "usage_type": "call"}, {"api_name": "core.models.MemberProfile.objects", "line_number": 580, "usage_type": "attribute"}, {"api_name": "core.models.MemberProfile", "line_number": 580, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 592, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 592, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 593, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 594, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 595, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 599, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 606, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.fields.RichTextField", "line_number": 607, "usage_type": "call"}, {"api_name": "django.db.models.DateField", "line_number": 608, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 608, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 609, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 609, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 613, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 613, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 617, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 617, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 618, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 618, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.FilterField", "line_number": 619, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 619, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 623, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 623, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 624, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 625, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 626, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.MultiFieldPanel", "line_number": 630, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.promote_panels", "line_number": 630, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 630, "usage_type": "name"}, {"api_name": "wagtail.wagtailimages.edit_handlers.ImageChooserPanel", "line_number": 631, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 639, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 640, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 641, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 641, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 642, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 642, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 645, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 646, "usage_type": "call"}]} +{"seq_id": "560631039", "text": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nr\"\"\"\nBasic training script for PyTorch\n\"\"\"\n\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom openpose.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom openpose.config import cfg\nfrom openpose.data import make_data_loader\nfrom openpose.solver import make_lr_scheduler\nfrom openpose.solver import make_optimizer\nfrom openpose.engine.inference import inference\nfrom openpose.engine.trainer import do_train_test\nfrom openpose.model.detector.densepose_model import DensePoseModel\nfrom openpose.utils.checkpoint import DetectronCheckpointer\nfrom openpose.utils.collect_env import collect_env_info\nfrom openpose.utils.comm import synchronize, get_rank\nfrom openpose.utils.imports import import_file\nfrom openpose.utils.logger import setup_logger\nfrom openpose.utils.miscellaneous import mkdir\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as plt_patches\nimport numpy as np\nfig,ax = plt.subplots(1)\nfigsub,axsub = plt.subplots(1)\n\ndef load_checkpoint(cfg, local_rank, distributed):\n model = DensePoseModel(cfg)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n optimizer = make_optimizer(cfg, model)\n scheduler = make_lr_scheduler(cfg, optimizer)\n\n if distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[local_rank], output_device=local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,\n )\n\n arguments = {}\n arguments[\"iteration\"] = 0\n\n output_dir = cfg.OUTPUT_DIR\n\n save_to_disk = get_rank() == 0\n checkpointer = DetectronCheckpointer(\n cfg, model, optimizer, scheduler, output_dir, save_to_disk\n )\n extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n arguments.update(extra_checkpoint_data)\n data_loader = make_data_loader(\n cfg,\n is_train=False,\n is_distributed=distributed,\n start_iter=arguments[\"iteration\"],\n )\n return model, data_loader\n\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Training\")\n parser.add_argument(\n \"--config-file\",\n default=\"../../config/densepose_r50_fpn_e2e.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--skip-test\",\n dest=\"skip_test\",\n help=\"Do not test the final model\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = num_gpus > 1\n\n if args.distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir:\n mkdir(output_dir)\n\n logger = setup_logger(\"openpose\", output_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(args)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, \"r\") as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n model,data_loader = load_checkpoint(cfg, args.local_rank, args.distributed)\n model.eval()\n for iteration, (images,_, _) in enumerate(data_loader[0]):\n images = images.to(cfg.MODEL.DEVICE)\n ax.clear()\n axsub.clear()\n pred = model(images)[0]\n img0 = images.tensors[0]\n img0 = img0.detach().cpu().numpy().transpose(1,2,0)\n #img0[...] += [102.9801,115.9465,122.7117]\n img0[:,:,0] += 102.9801\n img0[:,:,1] += 115.9465\n img0[:,:,2] += 122.7117\n img0 = img0.astype(np.uint8)\n bbox = pred.bbox\n uvs = pred.get_field('uv_output')\n ax.imshow(img0[:,:,::-1])\n\n for box,uv in zip(bbox[:2],uvs[:2]):\n rect = plt_patches.Rectangle((box[0],box[1]),box[2]-box[0],box[3]-box[1],linewidth=1,edgecolor='r',facecolor='none') \n ax.add_patch(rect)\n res = uv[0]\n for ch in range(1,15):\n res = np.hstack((res,uv[ch]))\n axsub.imshow(res)\n plt.draw()\n plt.pause(0.01)\n input()\n\n # if not args.skip_test:\n # run_test(cfg, model, args.distributed)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "openpose/tests/test_resnet50_test.py", "file_name": "test_resnet50_test.py", "file_ext": "py", "file_size_in_byte": 5052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "openpose.model.detector.densepose_model.DensePoseModel", "line_number": 34, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 34, "usage_type": "argument"}, {"api_name": "torch.device", "line_number": 35, "usage_type": "call"}, {"api_name": "openpose.config.cfg.MODEL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 35, "usage_type": "name"}, {"api_name": "openpose.solver.make_optimizer", "line_number": 38, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 38, "usage_type": "argument"}, {"api_name": "openpose.solver.make_lr_scheduler", "line_number": 39, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 39, "usage_type": "argument"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg.OUTPUT_DIR", "line_number": 51, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 51, "usage_type": "name"}, {"api_name": "openpose.utils.comm.get_rank", "line_number": 53, "usage_type": "call"}, {"api_name": "openpose.utils.checkpoint.DetectronCheckpointer", "line_number": 54, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 55, "usage_type": "argument"}, {"api_name": "openpose.config.cfg.MODEL", "line_number": 57, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 57, "usage_type": "name"}, {"api_name": "openpose.data.make_data_loader", "line_number": 59, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 60, "usage_type": "argument"}, {"api_name": "argparse.ArgumentParser", "line_number": 72, "usage_type": "call"}, {"api_name": "argparse.REMAINDER", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.distributed.init_process_group", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 101, "usage_type": "attribute"}, {"api_name": "openpose.utils.comm.synchronize", "line_number": 104, "usage_type": "call"}, {"api_name": "openpose.config.cfg.merge_from_file", "line_number": 106, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 106, "usage_type": "name"}, {"api_name": "openpose.config.cfg.merge_from_list", "line_number": 107, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 107, "usage_type": "name"}, {"api_name": "openpose.config.cfg.freeze", "line_number": 108, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 108, "usage_type": "name"}, {"api_name": "openpose.config.cfg.OUTPUT_DIR", "line_number": 110, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 110, "usage_type": "name"}, {"api_name": "openpose.utils.miscellaneous.mkdir", "line_number": 112, "usage_type": "call"}, {"api_name": "openpose.utils.logger.setup_logger", "line_number": 114, "usage_type": "call"}, {"api_name": "openpose.utils.comm.get_rank", "line_number": 114, "usage_type": "call"}, {"api_name": "openpose.utils.collect_env.collect_env_info", "line_number": 119, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 125, "usage_type": "argument"}, {"api_name": "openpose.config.cfg", "line_number": 127, "usage_type": "argument"}, {"api_name": "openpose.config.cfg.MODEL", "line_number": 130, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 140, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 146, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}]} +{"seq_id": "425976576", "text": "from sklearn.preprocessing import LabelBinarizer\n#from sklearn.metrics import classification_report\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nimport tensorflow as tf\nif tf.test.gpu_device_name():\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\nelse:\n print(\"Please install GPU version of TF\")\n\n#import sys\n#sys.path.append(\"/home/hrushikesh/dl4cv/callbacks\")\n\nfrom minivggnet import MiniVGGNet\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.optimizers import SGD\nimport argparse\n\nap=argparse.ArgumentParser()\nap.add_argument(\"-w\",\"--weights\",required=True,\n help=\"path to the best weights file\")\n#output directory to store figure and serialized JSON training history\nargs=vars(ap.parse_args())\n\n# load the training and testing data, then scale it into the\n# range [0, 1]\nprint(\"[INFO] loading CIFAR-10 data...\")\n((trainX, trainY), (testX, testY)) = cifar10.load_data()\ntrainX = trainX.astype(\"float\") / 255.0\ntestX = testX.astype(\"float\") / 255.0\n# convert the labels from integers to vectors\n\nlb = LabelBinarizer()\ntrainY = lb.fit_transform(trainY)\ntestY = lb.transform(testY)\n# initialize the optimizer and model\nprint(\"[INFO] compiling model...\")\nopt = SGD(lr=0.01, decay=0.01 / 40, momentum=0.9, nesterov=True)\nmodel = MiniVGGNet.build(width=32, height=32, depth=3, classes=10)\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt,\n metrics=[\"accuracy\"])\n\ncheckpoint= ModelCheckpoint(args[\"weights\"], monitor=\"val_loss\", mode=\"min\",\n save_best_only=True, verbose=1)\ncallbacks=[checkpoint]\n\n# train the network\nprint(\"[INFO] training network...\")\nH = model.fit(trainX, trainY, validation_data=(testX, testY),\nbatch_size=64, epochs=40, callbacks=callbacks, verbose=2)\n\n", "sub_path": "conv/cifar10_checkpoint_best.py", "file_name": "cifar10_checkpoint_best.py", "file_ext": "py", "file_size_in_byte": 1861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 10, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.cifar10.load_data", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.cifar10", "line_number": 32, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.SGD", "line_number": 42, "usage_type": "call"}, {"api_name": "minivggnet.MiniVGGNet.build", "line_number": 43, "usage_type": "call"}, {"api_name": "minivggnet.MiniVGGNet", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "526887720", "text": "import os\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Flatten, Dense\nfrom keras.models import load_model\nimport numpy as np\n\n\ntrain_count = 0\n\ntrain_count += len(os.listdir('trainset/'))\n\n\nimg_width, img_height = 100, 100\ntrain_data_dir = \"trainset/\"\nbatch_size = 32\nepochs = 5\nnb_classes = 10\ninput_shape = (img_width, img_height, 1)\n\nnb_train_samples = train_count\n\ndef nnmodel():\n\n model = Sequential()\n model.add(Conv2D(128, (3, 3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(1000))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes, activation=\"softmax\"))\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n return model\n\n\ndef train():\n\n model = nnmodel()\n\n print (model.summary())\n\n # this is the augmentation configuration we will use for training\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n color_mode='grayscale',\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=True)\n\n\n from PIL import ImageFile\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n\n model.fit_generator(\n train_generator,\n #steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs)\n #callbacks=[monitor,checkpointer])\n\n #model.load_weights('best_weights.hdf5') # load weights from best model\n model.save('model.h5')\n\n\n\ndef test():\n\n model = load_model('model.h5')\n # dimensions of our images.\n img_width, img_height = 100, 100\n\n test_data_dir = 'testset'\n\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n test_generator = test_datagen.flow_from_directory(\n test_data_dir,\n target_size=(img_width, img_height),\n color_mode='grayscale',\n batch_size=batch_size,\n class_mode = 'categorical',\n shuffle=False)\n\n filenames = test_generator.filenames\n\n print (filenames)\n predictions = model.predict_generator(test_generator, len(filenames))\n classes = []\n print (predictions)\n for prediction in predictions :\n #print sum(prediction)\n classes.append((list(prediction).index(max(prediction))))\n print (classes)\n\n\n#train()\n\ntest()\n\n\n\n\n\n\n", "sub_path": "signLanguagePredict.py", "file_name": "signLanguagePredict.py", "file_ext": "py", "file_size_in_byte": 2840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 73, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "301025180", "text": "#!/usr/bin/python3\n\"\"\"View fo User objects\"\"\"\n\nfrom models import storage\nfrom models.city import City\nfrom models.review import Review\nfrom models.place import Place\nfrom models.user import User\nfrom flask import Flask, jsonify, request, abort\nfrom api.v1.views import app_views\n\n\n@app_views.route('/places//reviews', methods=['GET'],\n strict_slashes=False)\ndef all_reviews(place_id):\n \"\"\"Retrieve all reviews based on place_id\"\"\"\n obj_places = storage.get(Place, place_id)\n list_reviews = []\n if obj_places:\n for review in obj_places.reviews:\n list_reviews.append(review.to_dict())\n return jsonify(list_reviews)\n else:\n abort(404)\n\n\n@app_views.route('/reviews/', methods=['GET'],\n strict_slashes=False)\ndef one_review(review_id):\n \"\"\"Retrieve a review based on review_id\"\"\"\n obj_review = storage.get(Review, review_id)\n if obj_review:\n return jsonify(obj_review.to_dict())\n abort(404)\n\n\n@app_views.route('/reviews/', methods=['DELETE'],\n strict_slashes=False)\ndef del_review(review_id):\n \"\"\"Delete a review based on review_id\"\"\"\n obj_review = storage.get(Review, review_id)\n if obj_review:\n obj_review.delete()\n storage.save()\n return({})\n abort(404)\n\n\n@app_views.route('/places//reviews', methods=['POST'],\n strict_slashes=False)\ndef create_review(place_id):\n \"\"\"Post review based on json\"\"\"\n obj_place = storage.get(Place, place_id)\n if obj_place is None:\n abort(404)\n obj_dict = request.get_json()\n if obj_dict is None:\n abort(400, 'Not a JSON')\n # transform the HTTP body request to a dictionary\n if 'user_id' not in obj_dict:\n abort(400, 'Missing user_id')\n user_id = obj_dict.get('user_id', None)\n obj_user = storage.get(User, user_id)\n if not obj_user:\n abort(404)\n if 'text' not in obj_dict:\n abort(400, 'Missing text')\n obj_review = Review(place_id=place_id, **obj_dict)\n obj_review.save()\n return jsonify(obj_review.to_dict()), 201\n\n\n@app_views.route('reviews/', methods=['PUT'],\n strict_slashes=False)\ndef update_review(review_id):\n \"\"\"Updates review based on user_id\"\"\"\n obj_review = storage.get(Review, review_id)\n if obj_review is None:\n abort(404)\n # transform the HTTP body request to a dictionary\n to_update = request.get_json()\n if to_update is None:\n abort(400, 'Not a JSON')\n\n # These keys cannot be update\n ignore_keys = ['id', 'user_id', 'place_id',\n 'created_at', 'updated_at']\n\n # check if key in dictionary is not allowed to be updated\n for key_ignore in ignore_keys:\n if key_ignore in to_update.keys():\n del to_update[key_ignore]\n if obj_review:\n for key, value in to_update.items():\n setattr(obj_review, key, value)\n obj_review.save()\n return jsonify(obj_review.to_dict()), 200\n else:\n abort(404)\n", "sub_path": "api/v1/views/places_reviews.py", "file_name": "places_reviews.py", "file_ext": "py", "file_size_in_byte": 3060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "models.storage.get", "line_number": 17, "usage_type": "call"}, {"api_name": "models.place.Place", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 24, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 13, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 13, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 31, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 31, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 34, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 27, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 27, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 41, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 41, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 41, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 44, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 46, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 37, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 37, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.place.Place", "line_number": 53, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 61, "usage_type": "call"}, {"api_name": "models.storage.get", "line_number": 63, "usage_type": "call"}, {"api_name": "models.user.User", "line_number": 63, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 67, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 49, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 49, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 77, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 77, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 99, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 73, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "595554525", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright 2018 NAVER Corp.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nimport argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport nsml\nfrom nsml import DATASET_PATH, HAS_DATASET, IS_ON_NSML\nfrom dataset import KinQueryDataset, preprocess\n\n\n# DONOTCHANGE: They are reserved for nsml\n# This is for nsml leaderboard\ndef bind_model(sess, config):\n # 학습한 모델을 저장하는 함수입니다.\n def save(dir_name, *args):\n # directory\n os.makedirs(dir_name, exist_ok=True)\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(dir_name, 'model'))\n\n # 저장한 모델을 불러올 수 있는 함수입니다.\n def load(dir_name, *args):\n saver = tf.train.Saver()\n # find checkpoint\n ckpt = tf.train.get_checkpoint_state(dir_name)\n if ckpt and ckpt.model_checkpoint_path:\n checkpoint = os.path.basename(ckpt.model_checkpoint_path)\n saver.restore(sess, os.path.join(dir_name, checkpoint))\n else:\n raise NotImplemented('No checkpoint!')\n print('Model loaded')\n\n def infer(raw_data, **kwargs):\n \"\"\"\n :param raw_data: raw input (여기서는 문자열)을 입력받습니다\n :param kwargs:\n :return:\n \"\"\"\n # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다\n preprocessed_data = preprocess(raw_data, config.strmaxlen)\n # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다\n pred = sess.run(prob, feed_dict={x: preprocessed_data})\n clipped = np.array(pred > config.threshold, dtype=np.int)\n # DONOTCHANGE: They are reserved for nsml\n # 리턴 결과는 [(확률, 0 or 1)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 확률의 값은 영향을 미치지 않습니다\n return list(zip(pred.flatten(), clipped.flatten()))\n\n # DONOTCHANGE: They are reserved for nsml\n # nsml에서 지정한 함수에 접근할 수 있도록 하는 함수입니다.\n nsml.bind(save=save, load=load, infer=infer)\n\n\ndef _batch_loader(iterable, n=1):\n length = len(iterable)\n for n_idx in range(0, length, n):\n yield iterable[n_idx:min(n_idx + n, length)]\n\n\ndef conv2d(x, f=64, k=3, s=1, pad='SAME', name=\"conv2d\"):\n return tf.layers.conv2d(x,\n filters=f, kernel_size=k, strides=s,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(5e-4),\n bias_initializer=tf.zeros_initializer(),\n padding=pad,\n name=name)\n\n\ndef dense(x, units, name='fc'):\n return tf.layers.dense(x, units,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(5e-4),\n bias_initializer=tf.zeros_initializer(),\n name=name)\n\n\ndef batch_norm(x, momentum=0.9, eps=1e-5, is_train=True, name=\"bn\"):\n return tf.layers.batch_normalization(inputs=x,\n momentum=momentum,\n epsilon=eps,\n scale=True,\n trainable=is_train,\n name=name)\n\n\ndef instance_norm(x, name=\"ibn\"):\n epsilon = 1e-9\n\n mean, var = tf.nn.moments(x, [1, 2], keepdims=True, name=name)\n\n return tf.div(tf.subtract(x, mean), tf.sqrt(tf.add(var, epsilon)))\n\n\ndef BiRNN(x, dropout=.5, embed_size=32, seq_length=200, h_units=512):\n n_hidden = h_units\n n_layers = 3\n # not yet implemented...\n return None\n\n\ndef contrastive_loss(y, d, batch_size):\n tmp = y * tf.square(d)\n tmp2 = (1. - y) * tf.square(tf.maximum((1 - d), 0))\n return tf.reduce_sum(tmp + tmp2) / batch_size / 2\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n\n # DONOTCHANGE: They are reserved for nsml\n args.add_argument('--mode', type=str, default='train')\n args.add_argument('--pause', type=int, default=0)\n args.add_argument('--iteration', type=str, default='0')\n\n # User options\n args.add_argument('--output', type=int, default=1)\n args.add_argument('--epochs', type=int, default=151)\n args.add_argument('--batch', type=int, default=256)\n args.add_argument('--strmaxlen', type=int, default=400)\n args.add_argument('--embedding', type=int, default=32)\n args.add_argument('--threshold', type=float, default=0.5)\n args.add_argument('--bn', type=bool, default=False)\n args.add_argument('--lr', type=float, default=1e-4)\n config = args.parse_args()\n\n if not HAS_DATASET and not IS_ON_NSML: # It is not running on nsml\n DATASET_PATH = '../sample_data/kin/'\n\n # model's specification (hyper-parameters)\n input_size = config.embedding * config.strmaxlen\n output_size = 1\n fc_unit = 1024\n conv_filters = 64\n learning_rate = config.lr\n character_size = 251\n\n x = tf.placeholder(tf.int32, [None, config.strmaxlen])\n y_ = tf.placeholder(tf.float32, [None, output_size])\n\n # embeddings\n char_embedding = tf.get_variable('char_embedding', [character_size, config.embedding])\n embedded = tf.nn.embedding_lookup(char_embedding, x)\n embedded = tf.reshape(embedded, (-1, 40, 40, 8)) # to 4-D\n\n print(\"[+] embedded size : \", embedded.get_shape().as_list()) # (batch_size, 40, 40, 8)\n\n logits = dense(embedded, output_size, name='fc-2')\n prob = tf.nn.sigmoid(logits)\n\n # logistic loss\n bce_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y_))\n\n # Adam Optimizer\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(bce_loss)\n # RMSProp Optimizer\n # train_step = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9).minimize(bce_loss)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n # not yet implemented...\n\n # DONOTCHANGE: Reserved for nsml\n bind_model(sess=sess, config=config)\n\n # DONOTCHANGE: Reserved for nsml\n if config.pause:\n nsml.paused(scope=locals())\n\n if config.mode == 'train':\n dataset = KinQueryDataset(DATASET_PATH, config.strmaxlen)\n\n dataset_len = len(dataset)\n one_batch_size = dataset_len // config.batch\n if dataset_len % config.batch != 0:\n one_batch_size += 1\n\n for epoch in range(config.epochs):\n avg_loss = 0.\n for i, (data, labels) in enumerate(_batch_loader(dataset, config.batch)):\n _, loss = sess.run([train_step, bce_loss],\n feed_dict={\n x: data,\n y_: labels\n })\n\n print('Batch : ', i + 1, '/', one_batch_size, ', BCE in this minibatch: ', float(loss))\n avg_loss += float(loss)\n\n print('epoch:', epoch, ' train_loss:', float(avg_loss / one_batch_size))\n\n min_loss = avg_loss\n\n nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs,\n train__loss=float(avg_loss / one_batch_size), step=epoch)\n\n # DONOTCHANGE (You can decide how often you want to save the model)\n nsml.save(epoch)\n\n # [(0.3, 0), (0.7, 1), ... ]\n elif config.mode == 'test_local':\n with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f:\n queries = f.readlines()\n res = []\n for batch in _batch_loader(queries, config.batch):\n temp_res = nsml.infer(batch)\n res += temp_res\n\n print(res)\n", "sub_path": "kin/_siamese_not_yet/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.makedirs", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dataset.preprocess", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 67, "usage_type": "attribute"}, {"api_name": "nsml.bind", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.variance_scaling_initializer", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.variance_scaling_initializer", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.moments", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tensorflow.div", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 128, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 132, "usage_type": "call"}, {"api_name": "nsml.HAS_DATASET", "line_number": 150, "usage_type": "name"}, {"api_name": "nsml.IS_ON_NSML", "line_number": 150, "usage_type": "name"}, {"api_name": "nsml.DATASET_PATH", "line_number": 151, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 161, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid_cross_entropy_with_logits", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 178, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 178, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 183, "usage_type": "call"}, {"api_name": "nsml.paused", "line_number": 191, "usage_type": "call"}, {"api_name": "dataset.KinQueryDataset", "line_number": 194, "usage_type": "call"}, {"api_name": "nsml.DATASET_PATH", "line_number": 194, "usage_type": "argument"}, {"api_name": "nsml.report", "line_number": 217, "usage_type": "call"}, {"api_name": "nsml.save", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "nsml.DATASET_PATH", "line_number": 225, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "nsml.infer", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "170856185", "text": "import sys\nimport os\nimport json\nfrom PyQt5.QtWidgets import (QMainWindow, QPushButton, QApplication, QWidget, QGridLayout, QAction, QVBoxLayout,\n qApp, QFileDialog, QHBoxLayout, QLabel)\nfrom table import Table\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.mappingtables = ''\n # Окно\n self.setGeometry(500, 300, 500, 100)\n self.setWindowTitle(\"JournalChanger\")\n self.mainwidget = QWidget(self)\n self.setCentralWidget(self.mainwidget)\n self.layout_grid = QGridLayout()\n self.mainwidget.setLayout(self.layout_grid)\n # Exit\n exitAction = QAction('&Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(qApp.quit)\n\n # Mapping Settings Actions\n self.newmappingAction = QAction('New mapping')\n self.newmappingAction.setShortcut('Ctrl+N')\n self.newmappingAction.triggered.connect(self.showmapping)\n\n self.openmappingAction = QAction('Open mapping')\n self.openmappingAction.setShortcut('Ctrl+O')\n self.openmappingAction.triggered.connect(self.showmapping)\n\n self.statusBar()\n\n self.menubar = self.menuBar()\n self.fileMenu = self.menubar.addMenu('&File')\n self.settings = self.fileMenu.addMenu('&Mappings')\n self.settings.addAction(self.newmappingAction)\n self.settings.addAction(self.openmappingAction)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(exitAction)\n\n # buttons\n self.but1 = QPushButton(\"Show Mappings\")\n self.layout_grid.addWidget(self.but1)\n self.but1.clicked.connect(self.showmapping)\n\n def showmapping(self):\n if self.sender().text() == \"New mapping\":\n filename = 'New'\n elif self.sender().text() == \"Open mapping\" or self.sender().text() == \"Show Mappings\":\n filename = QFileDialog.getOpenFileName(self, 'Open Mapping File', os.getcwd())\n else:\n print(\"test Button activated\")\n self.mappingtables = MappingWindow(filename)\n self.mappingtables.show()\n\n\nclass MappingWindow(QMainWindow):\n def __init__(self, filename):\n super().__init__()\n self.filename = filename\n # Window\n self.setGeometry(500, 300, 500, 100)\n self.setWindowTitle(\"Mappings\")\n self.mappingwidget = QWidget(self)\n self.setCentralWidget(self.mappingwidget)\n self.vbox_grid = QVBoxLayout()\n self.hbox_table_grid = QHBoxLayout()\n self.hbox_label_grid = QHBoxLayout()\n self.mappingwidget.setLayout(self.vbox_grid)\n\n # Toolbar actions\n # Exit\n exitAction = QAction('&Close', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip(\"Close mappings\")\n # Add row\n addrows = QAction('Add Row', self)\n addrows.triggered.connect(self.addrow)\n addrows.setStatusTip(\"Add row to tables\")\n # Delete row\n delrow = QAction('Del Row', self)\n delrow.triggered.connect(self.delrow)\n delrow.setStatusTip(\"Delete selected row\")\n\n self.statusBar()\n\n self.menu = self.menuBar()\n fileMenu = self.menu.addMenu('&File')\n fileMenu.addAction(addrows)\n fileMenu.addAction(exitAction)\n\n self.toolbar = self.addToolBar(\"&Add\")\n self.toolbar.addAction(addrows)\n self.toolbar.addAction(delrow)\n\n # Table for mapping\n self.sourcetable = Table(self.filename, 'Source')\n self.hbox_table_grid.addWidget(self.sourcetable)\n self.hbox_table_grid.addSpacing(40)\n self.targettable = Table(self.filename, 'Target')\n self.hbox_table_grid.addWidget(self.targettable)\n\n self.label1 = QLabel(\"Source\")\n self.label2 = QLabel('Target')\n self.hbox_label_grid.addWidget(self.label1)\n self.hbox_label_grid.addSpacing(40)\n self.hbox_label_grid.addWidget(self.label2)\n\n self.vbox_grid.addLayout(self.hbox_label_grid)\n self.vbox_grid.addLayout(self.hbox_table_grid)\n\n self.butsave = QPushButton('Save mapping')\n self.butsave.clicked.connect(self.writemapping)\n\n self.vbox_grid.addWidget(self.butsave)\n\n def addrow(self):\n row = self.sourcetable.rowCount() + 1\n self.sourcetable.setRowCount(row)\n self.targettable.setRowCount(row)\n\n def delrow(self):\n # delete selected row from source and target tables\n rowtodel = self.sourcetable.currentRow()\n self.sourcetable.removeRow(rowtodel)\n self.targettable.removeRow(rowtodel)\n\n def writemapping(self):\n filesave = QFileDialog.getSaveFileName(self, \"Save mapping\", os.getcwd(), \"JSON files(*.json)\")\n print(filesave)\n mappings = {}\n for i in range(0, self.sourcetable.rowCount()):\n mapp = (dict\n (\n Source=dict(\n account=self.sourcetable.item(i, 0).text(),\n ICP=self.sourcetable.item(i, 1).text(),\n MovProd=self.sourcetable.item(i, 2).text(),\n VarLob=self.sourcetable.item(i, 3).text(),\n MktOvr=self.sourcetable.item(i, 4).text(),\n AuditDim=self.sourcetable.item(i, 5).text(),\n RelPartDisc=self.sourcetable.item(i, 6).text(),\n CostCenterDisc=self.sourcetable.item(i, 7).text(),\n CustomType=self.sourcetable.item(i, 8).text()\n ),\n Target=dict(\n account=self.targettable.item(i, 0).text(),\n ICP=self.targettable.item(i, 1).text(),\n MovProd=self.targettable.item(i, 2).text(),\n VarLob=self.targettable.item(i, 3).text(),\n MktOvr=self.targettable.item(i, 4).text(),\n AuditDim=self.targettable.item(i, 5).text(),\n RelPartDisc=self.targettable.item(i, 6).text(),\n CostCenterDisc=self.targettable.item(i, 7).text(),\n CustomType=self.targettable.item(i, 8).text()\n )\n )\n )\n mapp2 = dict.fromkeys([self.sourcetable.item(i, 0).text()], mapp)\n mappings.update(mapp2)\n mappingtowrite = dict.fromkeys(['Mappings'], mappings)\n print(mappingtowrite)\n with open(filesave[0], 'w', encoding='utf-8') as writefile:\n x = json.dumps(mappingtowrite, sort_keys=True, indent=4, ensure_ascii=False)\n writefile.write(x)\n writefile.close()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n", "sub_path": "GUI.py", "file_name": "GUI.py", "file_ext": "py", "file_size_in_byte": 6815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.qApp.quit", "line_number": 24, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.qApp", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 54, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 71, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 85, "usage_type": "call"}, {"api_name": "table.Table", "line_number": 101, "usage_type": "call"}, {"api_name": "table.Table", "line_number": 104, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 108, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName", "line_number": 133, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 133, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 133, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 168, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 173, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 173, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "435584659", "text": "import tkinter as tk\nimport serial\nimport simplejson\nimport time\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom drawnow import *\nimport matplotlib.pyplot as plt\n\n\ndatos = []\narduinoData = serial.Serial('COM3',9600,timeout=5)\nplt.ion()\ncnt = 0\n\ndef suma():\n suma = int(entrada1.get())\n arduinoData.write(suma)\n time.sleep(1)\n print(suma)\n return var.set(suma)\n\ndef makeFig():\n plt.ylim(0,50)\n plt.title('Medicion de la Temperatura')\n plt.grid(True)\n plt.ylabel('Temperatura')\n plt.plot(datos,'ro-',label='Temperatura')\n plt.legend(loc='upper left')\n fig.canvas.draw()\n\nventana = tk.Tk()\nventana.wm_title(\"Lectura de Temperaturas\")\nvar = tk.StringVar()\n\nfig = plt.figure()\ncanvas = FigureCanvasTkAgg(fig,master=ventana)\ncanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\nel = tk.Label(ventana,text=\"Numero1: \",bg=\"pink\",fg=\"white\")\nel.pack(padx=5,pady=4,ipadx=5,ipady=5,fill=tk.X)\n\nentrada1 = tk.Entry(ventana)\nentrada1.pack(fill=tk.X,padx=5,pady=5,ipadx=5,ipady=5)\n\nbotonSuma = tk.Button(ventana,text=\"Suma\",fg=\"blue\",command=suma)\nbotonSuma.pack(side=tk.TOP)\n\nwhile True:\n while(arduinoData.inWaiting()==0):\n pass\n arduinoString = arduinoData.readline()\n jsonObject = simplejson.loads(arduinoString)\n temp = float(jsonObject[\"t\"])\n y = float(jsonObject[\"y\"])\n time.sleep(0.01)\n print(temp,\",\",y)\n datos.append(temp)\n drawnow(makeFig)\n plt.pause(.0001)\nventana.mainloop()", "sub_path": "python-arduino/Tkinter5.py", "file_name": "Tkinter5.py", "file_ext": "py", "file_size_in_byte": 1480, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "serial.Serial", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 31, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 36, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tkinter.BOTH", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 39, "usage_type": "call"}, {"api_name": "tkinter.X", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.X", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 46, "usage_type": "attribute"}, {"api_name": "simplejson.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "551360437", "text": "import os.path as osp\nimport argparse\n\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.datasets import Planetoid\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import GCNConv, ChebConv # noqa\nfrom torch_geometric.utils import from_scipy_sparse_matrix\n\nimport pickle\nimport numpy as np\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--use_gdc', action='store_true',\n help='Use GDC preprocessing.')\nargs = parser.parse_args()\n\ndataset = 'ota'\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)\nprint(path)\n# dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures())\n# data = dataset[0]\n\nwith open(osp.join(path,'processed_data.p'), 'rb') as fp:\n all_inputs = pickle.load(fp)\n\nif 'circuit_graph' in locals():\n del circuit_graph\n\nfor circuit_name, circuit_data in all_inputs.items():\n df = circuit_data[\"data_matrix\"]\n print(circuit_name)\n node_features = df.values\n node_features = np.delete(node_features, 0, 1)\n node_features = np.array(node_features, dtype=np.float32)\n node_features = node_features[:, 0:16]\n x = torch.Tensor(node_features)\n y = torch.Tensor(circuit_data[\"target\"])\n adj = circuit_data[\"adjacency_matrix\"]\n # print(adj.todense())\n edge_index, edge_weight = from_scipy_sparse_matrix(adj)\n print(edge_index,edge_weight)\n exit()\n if 'circuit_graph' in locals() and 'X' in locals():\n X = np.concatenate((X, node_features), axis=0)\n label = circuit_data[\"target\"].reshape((-1, 1))\n y = np.concatenate((y, label), axis=0)\n igraph = circuit_data[\"adjacency_matrix\"]\n circuit_graph = block_diag((circuit_graph, igraph)).tocsr()\n else:\n X = node_features\n y = circuit_data[\"target\"].reshape((-1, 1))\n circuit_graph = circuit_data[\"adjacency_matrix\"]\n\n\nif args.use_gdc:\n gdc = T.GDC(self_loop_weight=1, normalization_in='sym',\n normalization_out='col',\n diffusion_kwargs=dict(method='ppr', alpha=0.05),\n sparsification_kwargs=dict(method='topk', k=128,\n dim=0), exact=True)\n data = gdc(data)\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(dataset.num_features, 16, cached=True,\n normalize=not args.use_gdc)\n self.conv2 = GCNConv(16, dataset.num_classes, cached=True,\n normalize=not args.use_gdc)\n # self.conv1 = ChebConv(data.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, data.num_features, K=2)\n\n def forward(self):\n x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr\n x = F.relu(self.conv1(x, edge_index, edge_weight))\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index, edge_weight)\n return F.log_softmax(x, dim=1)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel, data = Net().to(device), data.to(device)\noptimizer = torch.optim.Adam([\n dict(params=model.conv1.parameters(), weight_decay=5e-4),\n dict(params=model.conv2.parameters(), weight_decay=0)\n], lr=0.01) # Only perform weight-decay on first convolution.\n\n\ndef train():\n model.train()\n optimizer.zero_grad()\n F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()\n optimizer.step()\n\n\n@torch.no_grad()\ndef test():\n model.eval()\n logits, accs = model(), []\n for _, mask in data('train_mask', 'val_mask', 'test_mask'):\n pred = logits[mask].max(1)[1]\n acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()\n accs.append(acc)\n return accs\n\n\nbest_val_acc = test_acc = 0\nfor epoch in range(1, 201):\n train()\n train_acc, val_acc, tmp_test_acc = test()\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n test_acc = tmp_test_acc\n log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'\n print(log.format(epoch, train_acc, best_val_acc, test_acc))\n", "sub_path": "examples/gcn_gana.py", "file_name": "gcn_gana.py", "file_ext": "py", "file_size_in_byte": 4082, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "torch_geometric.utils.from_scipy_sparse_matrix", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 49, "usage_type": "call"}, {"api_name": "torch_geometric.transforms.GDC", "line_number": 59, "usage_type": "call"}, {"api_name": "torch_geometric.transforms", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 70, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "224924702", "text": "\"\"\" Launcher functionality for the Google Compute Engine (GCE)\n\"\"\"\nimport json\nimport logging\nimport os\n\nfrom dcos_launch import onprem, util\nfrom dcos_launch.platforms import gcp\nfrom dcos_test_utils.helpers import Host\nfrom googleapiclient.errors import HttpError\n\nlog = logging.getLogger(__name__)\n\n\ndef get_credentials(env=None) -> tuple:\n path = None\n if env is None:\n env = os.environ.copy()\n if 'GCE_CREDENTIALS' in env:\n json_credentials = env['GCE_CREDENTIALS']\n elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:\n path = env['GOOGLE_APPLICATION_CREDENTIALS']\n json_credentials = util.read_file(path)\n else:\n raise util.LauncherError(\n 'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')\n\n return json_credentials, path\n\n\nclass OnPremLauncher(onprem.AbstractOnpremLauncher):\n # Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS\n def __init__(self, config: dict, env=None):\n creds_string, _ = get_credentials(env)\n self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))\n self.config = config\n\n @property\n def deployment(self):\n \"\"\" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the\n corresponding real deployment (active machines) exists and doesn't contain any errors.\n \"\"\"\n try:\n deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],\n self.config['gce_zone'])\n info = deployment.get_info()\n errors = info['operation'].get('error')\n if errors:\n raise util.LauncherError('DeploymentContainsErrors', str(errors))\n return deployment\n except HttpError as e:\n if e.resp.status == 404:\n raise util.LauncherError('DeploymentNotFound',\n \"The deployment you are trying to access doesn't exist\") from e\n raise e\n\n def create(self) -> dict:\n self.key_helper()\n node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']\n + self.config['num_private_agents'])\n gcp.BareClusterDeployment.create(\n self.gcp_wrapper,\n self.config['deployment_name'],\n self.config['gce_zone'],\n node_count,\n self.config['disk_size'],\n self.config['disk_type'],\n self.config['source_image'],\n self.config['machine_type'],\n self.config['image_project'],\n self.config['ssh_user'],\n self.config['ssh_public_key'],\n self.config['disable_updates'],\n self.config['use_preemptible_vms'],\n tags=self.config.get('tags'))\n return self.config\n\n def key_helper(self):\n \"\"\" Generates a public key and a private key and stores them in the config. The public key will be applied to\n all the instances in the deployment later on when wait() is called.\n \"\"\"\n if self.config['key_helper']:\n private_key, public_key = util.generate_rsa_keypair()\n self.config['ssh_private_key'] = private_key.decode()\n self.config['ssh_public_key'] = public_key.decode()\n\n def get_cluster_hosts(self) -> [Host]:\n return list(self.deployment.hosts)[1:]\n\n def get_bootstrap_host(self) -> Host:\n return list(self.deployment.hosts)[0]\n\n def wait(self):\n \"\"\" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once\n the network is deployed, a firewall for the network and an instance template are deployed. Finally,\n once the instance template is deployed, an instance group manager and all its instances are deployed.\n \"\"\"\n self.deployment.wait_for_completion()\n\n def delete(self):\n \"\"\" Deletes all the resources associated with the deployment (instance template, network, firewall, instance\n group manager and all its instances.\n \"\"\"\n self.deployment.delete()\n", "sub_path": "dcos_launch/gcp.py", "file_name": "gcp.py", "file_ext": "py", "file_size_in_byte": 4208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.copy", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "dcos_launch.util.read_file", "line_number": 23, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 23, "usage_type": "name"}, {"api_name": "dcos_launch.util.LauncherError", "line_number": 25, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 25, "usage_type": "name"}, {"api_name": "dcos_launch.onprem.AbstractOnpremLauncher", "line_number": 31, "usage_type": "attribute"}, {"api_name": "dcos_launch.onprem", "line_number": 31, "usage_type": "name"}, {"api_name": "dcos_launch.platforms.gcp.GcpWrapper", "line_number": 35, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp", "line_number": 35, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp.BareClusterDeployment", "line_number": 44, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp", "line_number": 44, "usage_type": "name"}, {"api_name": "dcos_launch.util.LauncherError", "line_number": 49, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 49, "usage_type": "name"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 51, "usage_type": "name"}, {"api_name": "dcos_launch.util.LauncherError", "line_number": 53, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 53, "usage_type": "name"}, {"api_name": "dcos_launch.platforms.gcp.BareClusterDeployment.create", "line_number": 61, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp.BareClusterDeployment", "line_number": 61, "usage_type": "attribute"}, {"api_name": "dcos_launch.platforms.gcp", "line_number": 61, "usage_type": "name"}, {"api_name": "dcos_launch.util.generate_rsa_keypair", "line_number": 83, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 83, "usage_type": "name"}, {"api_name": "dcos_test_utils.helpers.Host", "line_number": 87, "usage_type": "name"}, {"api_name": "dcos_test_utils.helpers.Host", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "414657027", "text": "import os.path as osp\nimport logging\nimport time\nimport argparse\nimport csv\nfrom collections import OrderedDict\n\nimport options.options as option\nimport utils.util as util\nfrom data.util import bgr2ycbcr\nfrom data import create_dataset, create_dataloader\nfrom models import create_model\n\n\ndef cal_pnsr_ssim(sr_img, gt_img, lr_img, lrgt_img):\n # save images\n suffix = opt['suffix']\n if suffix:\n save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '.png')\n else:\n save_img_path = osp.join(dataset_dir, folder, img_name + '.png')\n util.save_img(sr_img, save_img_path)\n #\n # if suffix:\n # save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '_GT.png')\n # else:\n # save_img_path = osp.join(dataset_dir, folder, img_name + '_GT.png')\n # util.save_img(gt_img, save_img_path)\n #\n if suffix:\n save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '_LR.png')\n else:\n save_img_path = osp.join(dataset_dir, folder, img_name + '_LR.png')\n util.save_img(lr_img, save_img_path)\n #\n # if suffix:\n # save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '_LR_ref.png')\n # else:\n # save_img_path = osp.join(dataset_dir, folder, img_name + '_LR_ref.png')\n # util.save_img(lrgt_img, save_img_path)\n\n # calculate PSNR and SSIM\n gt_img = gt_img / 255.\n sr_img = sr_img / 255.\n\n lr_img = lr_img / 255.\n lrgt_img = lrgt_img / 255.\n\n crop_border = opt['crop_border'] if opt['crop_border'] else opt['scale']\n if crop_border == 0:\n cropped_sr_img = sr_img\n cropped_gt_img = gt_img\n else:\n cropped_sr_img = sr_img[crop_border:-crop_border, crop_border:-crop_border, :]\n cropped_gt_img = gt_img[crop_border:-crop_border, crop_border:-crop_border, :]\n\n psnr = util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)\n ssim = util.calculate_ssim(cropped_sr_img * 255, cropped_gt_img * 255)\n test_results['psnr'].append(psnr)\n test_results['ssim'].append(ssim)\n\n # PSNR and SSIM for LR\n psnr_lr = util.calculate_psnr(lr_img * 255, lrgt_img * 255)\n ssim_lr = util.calculate_ssim(lr_img * 255, lrgt_img * 255)\n test_results['psnr_lr'].append(psnr_lr)\n test_results['ssim_lr'].append(ssim_lr)\n\n if gt_img.shape[2] == 3: # RGB image\n sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n if crop_border == 0:\n cropped_sr_img_y = sr_img_y\n cropped_gt_img_y = gt_img_y\n else:\n cropped_sr_img_y = sr_img_y[crop_border:-crop_border, crop_border:-crop_border]\n cropped_gt_img_y = gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n psnr_y = util.calculate_psnr(cropped_sr_img_y * 255, cropped_gt_img_y * 255)\n ssim_y = util.calculate_ssim(cropped_sr_img_y * 255, cropped_gt_img_y * 255)\n test_results['psnr_y'].append(psnr_y)\n test_results['ssim_y'].append(ssim_y)\n\n lr_img_y = bgr2ycbcr(lr_img, only_y=True)\n lrgt_img_y = bgr2ycbcr(lrgt_img, only_y=True)\n psnr_y_lr = util.calculate_psnr(lr_img_y * 255, lrgt_img_y * 255)\n ssim_y_lr = util.calculate_ssim(lr_img_y * 255, lrgt_img_y * 255)\n test_results['psnr_y_lr'].append(psnr_y_lr)\n test_results['ssim_y_lr'].append(ssim_y_lr)\n\n writer.writerow([osp.join(folder, img_name), psnr_y, psnr_y_lr, ssim_y, ssim_y_lr])\n logger.info(\n '{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}. LR PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.'.\n format(osp.join(folder, img_name), psnr, ssim, psnr_y, ssim_y, psnr_lr, ssim_lr, psnr_y_lr, ssim_y_lr))\n else:\n writer.writerow([osp.join(folder, img_name), psnr, psnr_lr])\n logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}. LR PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(\n osp.join(folder, img_name), psnr, ssim, psnr_lr, ssim_lr))\n\n return test_results\n\n\n# options\nparser = argparse.ArgumentParser()\nparser.add_argument('-opt', type=str, required=True, help='Path to options YMAL file.')\nopt = option.parse(parser.parse_args().opt, is_train=False)\nopt = option.dict_to_nonedict(opt)\n\nutil.mkdirs(\n (path for key, path in opt['path'].items()\n if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))\nutil.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,\n screen=True, tofile=True)\nlogger = logging.getLogger('base')\nlogger.info(option.dict2str(opt))\n\n# Create test dataset and dataloader\ntest_loaders = []\nfor phase, dataset_opt in sorted(opt['datasets'].items()):\n test_set = create_dataset(dataset_opt)\n test_loader = create_dataloader(test_set, dataset_opt)\n logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))\n test_loaders.append(test_loader)\n\nmodel = create_model(opt)\nfor test_loader in test_loaders:\n test_set_name = test_loader.dataset.opt['name']\n logger.info('\\nTesting [{:s}]...'.format(test_set_name))\n test_start_time = time.time()\n dataset_dir = osp.join(opt['path']['results_root'], test_set_name)\n # util.mkdir(dataset_dir)\n\n test_results = OrderedDict()\n test_results['psnr'] = []\n test_results['ssim'] = []\n test_results['psnr_y'] = []\n test_results['ssim_y'] = []\n\n test_results['psnr_lr'] = []\n test_results['ssim_lr'] = []\n test_results['psnr_y_lr'] = []\n test_results['ssim_y_lr'] = []\n\n with open(osp.join(opt['path']['log'], 'test_' + opt['name'] + '_test.csv'), 'w') as f:\n writer = csv.writer(f)\n for data in test_loader:\n model.feed_data(data)\n if test_set_name == 'Vid4':\n folder = osp.split(osp.dirname(data['GT_path'][0][0]))[1]\n else:\n folder = ''\n util.mkdir(osp.join(dataset_dir, folder))\n\n model.test()\n visuals = model.get_current_visuals()\n\n if test_set_name == 'Vimeo90K':\n center = visuals['SR'].shape[0] // 2\n img_path = data['GT_path'][0]\n img_name = osp.splitext(osp.basename(img_path))[0]\n\n sr_img = util.tensor2img(visuals['SR']) # uint8\n gt_img = util.tensor2img(visuals['GT'][center]) # uint8\n lr_img = util.tensor2img(visuals['LR']) # uint8\n lrgt_img = util.tensor2img(visuals['LR_ref'][center]) # uint8\n\n test_results = cal_pnsr_ssim(sr_img, gt_img, lr_img, lrgt_img)\n\n else:\n t_step = visuals['SR'].shape[0]\n for i in range(t_step):\n img_path = data['GT_path'][i][0]\n img_name = osp.splitext(osp.basename(img_path))[0]\n\n sr_img = util.tensor2img(visuals['SR'][i]) # uint8\n gt_img = util.tensor2img(visuals['GT'][i]) # uint8\n lr_img = util.tensor2img(visuals['LR'][i]) # uint8\n lrgt_img = util.tensor2img(visuals['LR_ref'][i]) # uint8\n\n test_results = cal_pnsr_ssim(sr_img, gt_img, lr_img, lrgt_img)\n\n # Average PSNR/SSIM results\n ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])\n ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])\n\n ave_psnr_lr = sum(test_results['psnr_lr']) / len(test_results['psnr_lr'])\n ave_ssim_lr = sum(test_results['ssim_lr']) / len(test_results['ssim_lr'])\n\n logger.info(\n '----Average PSNR/SSIM results for {}----\\n\\tpsnr: {:.6f} db; ssim: {:.6f}. LR psnr: {:.6f} db; ssim: {:.6f}.\\n'.format(\n test_set_name, ave_psnr, ave_ssim, ave_psnr_lr, ave_ssim_lr))\n if test_results['psnr_y'] and test_results['ssim_y']:\n ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])\n ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])\n\n ave_psnr_y_lr = sum(test_results['psnr_y_lr']) / len(test_results['psnr_y_lr'])\n ave_ssim_y_lr = sum(test_results['ssim_y_lr']) / len(test_results['ssim_y_lr'])\n logger.info(\n '----Y channel, average PSNR/SSIM----\\n\\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}. LR PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.\\n'.\n format(ave_psnr_y, ave_ssim_y, ave_psnr_y_lr, ave_ssim_y_lr))\n", "sub_path": "codes/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 8426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.util.save_img", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.util.save_img", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.util.calculate_psnr", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 57, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 58, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 58, "usage_type": "name"}, {"api_name": "utils.util.calculate_psnr", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 63, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 64, "usage_type": "name"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 69, "usage_type": "call"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.util.calculate_psnr", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 78, "usage_type": "name"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 82, "usage_type": "call"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 83, "usage_type": "call"}, {"api_name": "utils.util.calculate_psnr", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 84, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 102, "usage_type": "call"}, {"api_name": "options.options.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "options.options", "line_number": 104, "usage_type": "name"}, {"api_name": "options.options.dict_to_nonedict", "line_number": 105, "usage_type": "call"}, {"api_name": "options.options", "line_number": 105, "usage_type": "name"}, {"api_name": "utils.util.mkdirs", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 107, "usage_type": "name"}, {"api_name": "utils.util.setup_logger", "line_number": 110, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 110, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 110, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 112, "usage_type": "call"}, {"api_name": "options.options.dict2str", "line_number": 113, "usage_type": "call"}, {"api_name": "options.options", "line_number": 113, "usage_type": "name"}, {"api_name": "data.create_dataset", "line_number": 118, "usage_type": "call"}, {"api_name": "data.create_dataloader", "line_number": 119, "usage_type": "call"}, {"api_name": "models.create_model", "line_number": 123, "usage_type": "call"}, {"api_name": "time.time", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 143, "usage_type": "call"}, {"api_name": "data.util", "line_number": 144, "usage_type": "name"}, {"api_name": "data.util", "line_number": 145, "usage_type": "argument"}, {"api_name": "os.path.split", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 147, "usage_type": "call"}, {"api_name": "data.util", "line_number": 147, "usage_type": "name"}, {"api_name": "utils.util.mkdir", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "name"}, {"api_name": "data.util", "line_number": 157, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 158, "usage_type": "call"}, {"api_name": "utils.util.tensor2img", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 160, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 161, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 161, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 162, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 162, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 163, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 163, "usage_type": "name"}, {"api_name": "data.util", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.util.tensor2img", "line_number": 173, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 173, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 174, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 175, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 176, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 176, "usage_type": "name"}]} +{"seq_id": "228732805", "text": "#######################################################\n#################### IMPORT LIBRARY ####################\n########################################################\nimport bs4\nimport lxml\nimport numpy\nfrom pandas import *\nimport stop_words\nimport re\nimport time\nimport requests\nimport datetime\nimport os\nfrom selenium import webdriver\n\nbrowser = webdriver.Chrome(\"/Users/chou/Google Drive/websites/github/web_scraping/chromedriver\")\n\n\n###################################################\n#################### ARGUMENTS ####################\n###################################################\ninput_job = \"data scientist\"\ninput_quote = False\ninput_city = \"\"\ninput_state = \"NC\"\nsign_1 = \"-\"\nsign_2 = \"+\"\n\nBASE_URL_indeed = 'http://www.indeed.com'\nBASE_URL_monster = 'https://www.monster.com'\nBASE_URL_dice = 'https://www.dice.com'\nBASE_URL_careerbuilder = 'http://www.careerbuilder.com'\n\nclass syntax:\n def __init__(self, input, sign, quote = False):\n self.input = input\n self.sign = sign\n self.quote = quote\n\n def transform(self):\n syntax.output = self.input.replace(\" \", self.sign)\n if self.quote == True:\n syntax.output = ''.join(['\"', syntax.output, '\"'])\n return(syntax.output)\n\ndef basic_careerbuilder(BASE_URL, input_job, input_city, input_state, input_quote, sign_1, sign_2):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30'}\n if input_city != \"\":\n basic_url = [ BASE_URL, '/jobs-',\n syntax(input_job, sign_1, input_quote).transform(), '-in-', input_city,\n ',', input_state]\n basic_url = ''.join(basic_url)\n url_careerbuilder_list = [ basic_url, '?keywords=', syntax(input_job, sign_2, input_quote).transform(),\n '&location=', input_city, '%2C+', input_state ]\n url_careerbuilder = ''.join(url_careerbuilder_list)\n else:\n basic_url = [ BASE_URL, '/jobs-',\n syntax(input_job, sign_1, input_quote).transform(), '-in-',\n input_state]\n basic_url = ''.join(basic_url)\n url_careerbuilder_list = [ basic_url, '?keywords=', syntax(input_job, sign_2, input_quote).transform(),\n '&location=', input_state ]\n url_careerbuilder = ''.join(url_careerbuilder_list)\n\n print(url_careerbuilder)\n try:\n rawcode_careerbuilder = browser.get(url_careerbuilder) # timeout = 3, headers=headers\n #requests.get\n soup_careerbuilder = bs4.BeautifulSoup(browser.page_source, \"lxml\") #rawcode_careerbuilder.text\n except requests.exceptions.Timeout:\n pass\n\n browser.close()\n num_total_careerbuilder = soup_careerbuilder.find(\n 'div', {'class' : 'count'}).contents[0]\n num_total_careerbuilder = int(re.sub('[\\(\\)\\{\\}<>]', '',\n num_total_careerbuilder).split()[0])\n print(num_total_careerbuilder)\n num_pages_careerbuilder = int(numpy.ceil(num_total_careerbuilder/25.0))\n print(num_pages_careerbuilder)\n\n job_df_careerbuilder = pandas.DataFrame()\n for i in range(1, num_pages_careerbuilder+1):\n url = ''.join([basic_url,'?page_number=', str(i)])\n\n rawcode = requests.get(url, headers=headers)\n soup = bs4.BeautifulSoup(rawcode.text, \"lxml\")\n\n divs = soup.findAll(\"div\")\n job_divs = [jp for jp in divs if not jp.get('class') is None\n and 'job-row' in jp.get('class')]\n\n for job in job_divs:\n try:\n id = job.find('h2',{'class' : 'job-title'}).find('a').attrs['data-job-did']\n title = job.find('h2', {'class' : 'job-title'}).text.strip()\n company = job.find('div', {'class' : 'columns large-2 medium-3 small-12'}).find(\n 'h4', {'class': 'job-text'}).text.strip()\n location = job.find('div', {'class' : 'columns end large-2 medium-3 small-12'}).find(\n 'h4', {'class': 'job-text'}).text.strip()\n link = BASE_URL_careerbuilder + '/job/' + id\n except:\n continue\n\n job_df_careerbuilder = job_df_careerbuilder.append({'job_title': title,\n 'job_id': id,\n 'job_company': company,\n 'from':'Careerbuilder',\n 'job_location':location,\n 'job_link':link},ignore_index=True)\n cols=['from','job_id','job_title','job_company','job_location','job_link']\n job_df_careerbuilder = job_df_careerbuilder[cols] # reorder the columns of dataframe\n job_df_careerbuilder = job_df_careerbuilder.drop_duplicates(['job_link'], keep='first')\n return(job_df_careerbuilder)\n\njob_df_careerbuilder = basic_careerbuilder(BASE_URL_careerbuilder, input_job, input_city, input_state, input_quote,\n sign_1, sign_2)\nprint(job_df_careerbuilder.shape)\n\n##########################################################################\n#################### Job Info that I am interested in ####################\n##########################################################################\n##### Job types #####\ntype = ['Full-time', 'Part-time', 'Contractor', 'Contract', 'Full time', 'Part time']\ntype_lower = [s.lower() for s in type]\ntype_map = pandas.DataFrame({'raw':type, 'lower':type_lower})\ntype_dic = list(type_map.set_index('lower').to_dict().values()).pop()\n\n##### Skills #####\nskills = ['Scala', 'Ruby', 'C++', 'Perl', 'R', 'Java', 'Matlab', 'JavaScript',\n 'Python', 'SPSS', 'D3.js', 'Tableau', 'Excel', 'SAS', 'D3', 'Mahout',\n 'Hadoop', 'Pig', 'Spark', 'ZooKeeper', 'MapReduce', 'Shark', 'Hive',\n 'Oozie', 'Flume', 'HBase', 'Cassandra', 'NoSQL', 'SQL', 'MongoDB', 'GIS',\n 'AWS', 'Haskell', 'PHP', 'Perl', 'Stata', 'Shiny']\nskills_lower = [s.lower() for s in skills]\nskills_map = pandas.DataFrame({'raw':skills, 'lower':skills_lower})\nskills_dic = list(skills_map.set_index('lower').to_dict().values()).pop()\n\n##### Education #####\nedu = ['Bachelor', 'Master', 'PhD', 'MBA', 'M.S.', 'M.S', 'MS', 'Ph.D.', 'BS',\n \"Bachelor's\", \"Master's\", \"PhD's\"]\nedu_lower = [s.lower() for s in edu]\nedu_map = pandas.DataFrame({'raw':edu, 'lower':edu_lower})\nedu_dic = list(edu_map.set_index('lower').to_dict().values()).pop()\n\n##### Major ######\nmajor = ['Computer Science', 'Statistics', 'Mathematics', 'Math','Physics',\n 'Machine Learning','Economics','Software Engineering', 'Engineering',\n 'Information System', 'Quantitative Finance', 'Biostatistics', 'Bioinformatics',\n 'AI', 'Artificial Intelligence']\nmajor_lower = [s.lower() for s in major]\nmajor_map = pandas.DataFrame({'raw':major, 'lower':major_lower})\nmajor_dic = list(major_map.set_index('lower').to_dict().values()).pop()\n\n##### Key Words ######\nkeywords = ['Web Analytics', 'Regression', 'Classification', 'User Experience', 'Big Data',\n 'Streaming Data', 'Real-Time Data', 'Real Time', 'Time Series']\nkeywords_lower = [s.lower() for s in keywords]\nkeywords_map = pandas.DataFrame({'raw':keywords, 'lower':keywords_lower})\nkeywords_dic = list(keywords_map.set_index('lower').to_dict().values()).pop()\n\n###############################################################\n#################### Function for Scraping ####################\n###############################################################\ndef scrape_job(link):\n required_type= []\n required_skills = []\n required_edu = []\n required_major = []\n required_keywords = []\n\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30'}\n job_page = requests.get(link, headers=headers)\n\n soup = bs4.BeautifulSoup(job_page.text, \"lxml\")\n for elem in soup.findAll(['script','style','head','title']):\n elem.extract()\n texts = soup.getText(separator=' ').lower()\n\n string = re.sub(r'\\,', ' ', texts) # remove \",\"\n # print(string.encode('utf-8'))\n string = re.sub('/', ' ', string) # remove \"/\"\n # print(string.encode('utf-8'))\n string = re.sub(r'\\(', ' ', string) # remove \"(\"\n # print(string.encode('utf-8'))\n string = re.sub(r'\\)', ' ', string) # remove \")\"\n # print(string.encode('utf-8'))\n string = re.sub(r'[\\n\\r\\t]', ' ', string) # remove \"\\n\", \"\\r\", \"\\t\"\n # print(string.encode('utf-8'))\n string = re.sub(' +',' ',string) # remove more than one space\n string = re.sub(r'r\\s&\\sd', ' ', string) # avoid picking 'r & d'\n string = re.sub(r'r&d', ' ', string) # avoid picking 'r&d'\n # print(string.encode('utf-8'))\n\n for typ in type_lower :\n if any(x in typ for x in ['+', '#', '.']):\n typp = re.escape(typ)\n else:\n typp = typ\n result = re.search(r'(?:^|(?<=\\s))' + typp + r'(?=\\s|$)', string)\n if result:\n required_type.append(typ)\n\n for sk in skills_lower :\n if any(x in sk for x in ['+', '#', '.']):\n skk = re.escape(sk)\n else:\n skk = sk\n result = re.search(r'(?:^|(?<=\\s))' + skk + r'(?=\\s|$)',string)\n if result:\n required_skills.append(sk)\n\n for ed in edu_lower :\n if any(x in ed for x in ['+', '#', '.']):\n edd = re.escape(ed)\n else:\n edd = ed\n result = re.search(r'(?:^|(?<=\\s))' + edd + r'(?=\\s|$)', string)\n if result:\n required_edu.append(ed)\n\n for maj in major_lower :\n if any(x in maj for x in ['+', '#', '.']):\n majj = re.escape(maj)\n else:\n majj = maj\n result = re.search(r'(?:^|(?<=\\s))' + majj + r'(?=\\s|$)', string)\n if result:\n required_major.append(maj)\n\n for key in keywords_lower :\n if any(x in key for x in ['+', '#', '.']):\n keyy = re.escape(key)\n else:\n keyy = key\n result = re.search(r'(?:^|(?<=\\s))' + keyy + r'(?=\\s|$)', string)\n if result:\n required_keywords.append(key)\n\n except:\n required_type = 'Forbidden'\n required_skills = 'Forbidden'\n required_edu ='Forbidden'\n rquired_major = 'Forbidden'\n required_keywords = 'Forbidden'\n # continue\n\n all_job ={'type':required_type, 'skills':required_skills, 'edu':required_edu,\n 'major':required_major, 'keywords':required_keywords}\n return(all_job)\n\nlks = job_df_careerbuilder['job_link']\nll = [link for link in lks]\n# print(len(ll))\n\nimport multiprocessing as mp\n# print(mp.cpu_count()) #4\nif __name__ == '__main__':\n pool = mp.Pool(processes = 8)\n results = pool.map(scrape_job, ll)\n pool.close()\n pool.join()\n\n# print(results)\n# print(len(results))\n\njob_type = [d['type'] for d in results]\njob_skills = [d['skills'] for d in results]\njob_edu = [d['edu'] for d in results]\njob_major = [d['major'] for d in results]\njob_keywords = [d['keywords'] for d in results]\n\njob_df_careerbuilder['job_type'] = job_type\njob_df_careerbuilder['job_skills'] = job_skills\njob_df_careerbuilder['job_edu'] = job_edu\njob_df_careerbuilder['job_major'] = job_major\njob_df_careerbuilder['job_keywords'] = job_keywords\nprint(job_df_careerbuilder.shape)\n\nnow = datetime.datetime.now()\nnow_str = now.strftime(\"%m%d%Y\")\ndir_str = '/Users/chou/Google Drive/websites/github/web_scraping/data/' + now_str + '/'\n\nif not os.path.exists(dir_str):\n os.makedirs(dir_str)\n\njob_df_careerbuilder.to_csv(dir_str + input_job +'_job_df_careerbuilder.csv')\n", "sub_path": "careerbuilder_fast.py", "file_name": "careerbuilder_fast.py", "file_ext": "py", "file_size_in_byte": 11981, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 69, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 70, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 82, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 86, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 136, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 174, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 176, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 181, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 183, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 185, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 187, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 189, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 191, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 192, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 193, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 198, "usage_type": "call"}, {"api_name": "re.search", "line_number": 201, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 207, "usage_type": "call"}, {"api_name": "re.search", "line_number": 210, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 216, "usage_type": "call"}, {"api_name": "re.search", "line_number": 219, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 225, "usage_type": "call"}, {"api_name": "re.search", "line_number": 228, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 234, "usage_type": "call"}, {"api_name": "re.search", "line_number": 237, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 260, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path", "line_number": 285, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 286, "usage_type": "call"}]} +{"seq_id": "296929602", "text": "\"\"\"Animation of the trajectories of artists using matplotlib.\nThe Animator class takes a tuple of pairs, each pair corresponds to an\nartist. Each pair (artist) contains the trajectory, as a list, in the\nx and y directions, respectively. Both lists are parametrized using the\nsame variable (typically time).\nThe trajectory followed by the artists is assumed to be in two spatial\ndimensions.\nWith this tuple of tuples the class is capable of showing an animation\nof all of the artists in a frame set using matplotlib's pyplot. This\nclass is very useful when animating physical systems such as planets,\nballs, charges, etc. Specially if you do not want to worry about little\ndetails like axes, titles, labels, etc.\nFor more details on how to use matplotlib to do animations, visit\nhttps://matplotlib.org/api/animation_api.html\nClasses\n-------\nAnimator : Sets, runs or saves animations of a tuple of artists.\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nclass Animator:\n \"\"\"Set, run or save animations of artists given their trajectories.\n Attributes\n ----------\n artists : tuple of pairs\n Time step of the integration. It does not only mean time, it is\n just the independent variable of the differential equation.\n art_num : int\n Number of artists. It is the len(self.artists).\n fig : matplotlib.figure.Figure\n Figure that will frame the animation.\n ax : matplotlib.axes._subplots.AxesSubplot\n Axes related to self.fig.\n lines : list of matplotlib.lines.Line2D\n Trajectories or lines to be drawn, one for each artist.\n points : list of matplotlib.lines.Line2D\n The beginning of the trajectory of each artist is represented\n with a point in the Figure. Each pair of lists contains only\n one data in each list.\n time_template : str\n Template that saves the current time of the simulation. It is\n passed over to self.time_text so it can be printed in the\n Figure. It specifies the format in which the time will be printed.\n time_text : matplotlib.text.Text\n Text that will show the current time of the simulation in the\n Figure using the information provided by self.time_template.\n \"\"\"\n\n def __init__(self, objs): # file=None\n \"\"\"Construct an Animator instance given a tuple of artists.\n objs - tuple of pairs to be drawn (artists trajectories).\n \"\"\"\n self.artists = objs\n self.art_num = len(objs)\n self.fig = self.ax = None\n self.lines, self.points = [], []\n self.time_template = self.time_text = None\n\n def setup_anime(self, xmin_off=0, ymin_off=0, xmax_off=0, ymax_off=0):\n \"\"\"Set up the animation.\n xmin_off - offset for the xmin limit calculated below.\n ymin_off - offset for the ymin limit calculated below.\n xmax_off - offset for the xmax limit calculated below.\n ymax_off - offset for the ymax limit calculated below.\n First, it finds out the limits of the Figure, setting up the\n figure, axes, background color of plot, etc.\n Second, sets up the color for the trajectory of each artist and\n appends the plot line to self.lines. Then, something similar is\n done for self.points.\n Finally, the time_template is defined and the text that will\n print the current time is set.\n \"\"\"\n xtremes = [(min(x), min(y), max(x), max(y)) for x, y in self.artists]\n xmin = min(map(lambda lst: lst[0], xtremes)) + xmin_off\n ymin = min(map(lambda lst: lst[1], xtremes)) + ymin_off\n xmax = max(map(lambda lst: lst[2], xtremes)) + xmax_off\n ymax = max(map(lambda lst: lst[3], xtremes)) + ymax_off\n print(\"Xtremes:\", xmin, xmax, ymin, ymax)\n\n self.fig = plt.figure()\n self.ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin, ymax),\n autoscale_on=False)\n self.ax.set_facecolor('k')\n self.ax.set(xlabel='x [a.u.]', ylabel='y [a.u.]',\n title='Projectile motion')\n self.ax.set_aspect('equal')\n self.ax.grid()\n\n for a in range(self.art_num):\n ln, = self.ax.plot([], [], '--')\n ln.set_clip_on(False)\n self.lines.append(ln)\n\n plt.gca().set_prop_cycle(None)\n\n for a in range(self.art_num):\n pt, = self.ax.plot([], [], 'o')\n pt.set_clip_on(False)\n self.points.append(pt)\n\n self.time_template = 'time = %d a.u.'\n self.time_text = self.ax.text(.5, .5, '', color='c',\n transform=self.ax.transAxes,\n horizontalalignment='center',\n verticalalignment='center')\n\n def init_anime(self):\n \"\"\"Initialize animation, used to draw a clear frame.\n It will be passed over to the parameter init_func defined in\n matplotlib.animation.FuncAnimation.\n \"\"\"\n for a in range(self.art_num):\n self.lines[a].set_data([], [])\n self.points[a].set_data([], [])\n self.time_text.set_text('')\n return self.lines + self.points + [self.time_text]\n\n def animate(self, idx):\n \"\"\"Initialize animation, used to draw a clear frame.\n idx - argument will be the next value in frames.\n It will be passed over as the function to call at each frame\n defined as func in matplotlib.animation.FuncAnimation.\n \"\"\"\n for a in range(self.art_num):\n if idx < len(self.artists[a][0]):\n xc, yc = self.artists[a][0][idx], self.artists[a][1][idx]\n self.lines[a].set_data(self.artists[a][0][:idx],\n self.artists[a][1][:idx])\n self.points[a].set_data(xc, yc)\n self.time_text.set_text(self.time_template % idx)\n return self.lines + self.points + [self.time_text]\n\n def run_anime(self, inval=10, rep=True, blitit=False):\n \"\"\"Invoke matplotlib.animation.FuncAnimation and display animation.\n inval - delay between frames in milliseconds (default 200).\n rep - whether to repeat the animation in repeated (default True).\n blitit - controls whether blitting is used to optimize drawing\n (default False).\n \"\"\"\n ani = animation.FuncAnimation(self.fig, self.animate,\n len(self.artists[0][0]), repeat=rep,\n interval=inval, blit=blitit,\n init_func=self.init_anime)\n plt.show()\n\n def save_anime(self, filename, inval=10, rep=True, blitit=False):\n \"\"\"Invoke matplotlib.animation.FuncAnimation and save animation.\n inval - delay between frames in milliseconds (default 200).\n rep - whether to repeat the animation in repeated (default True).\n blitit - controls whether blitting is used to optimize drawing\n (default False).\n Notice that the animation is saved using imagemagick; however,\n other writers can be used. Available writers can be found calling\n animation.writers.list().\n \"\"\"\n print(animation.writers.list())\n ani = animation.FuncAnimation(self.fig, self.animate,\n len(self.artists[0][0]), repeat=rep,\n interval=inval, blit=blitit,\n init_func=self.init_anime)\n ani.save(filename, writer='imagemagick', fps=inval)\n\n\nif __name__ == \"__main__\":\n anime = Animator((([0, 2, 4, 6], [-5, 0, 5, 10]),\n ([0, 1, 2, 3], [0, -1, -2, -3]),\n ([1, 2, 3, 4], [2, 4, 6, 8]),\n ([2, 3, 4, 5], [4, 9, 16, 25])))\n anime.setup_anime()\n anime.run_anime(inval=1000, rep=True)\n", "sub_path": "animator/animator.py", "file_name": "animator.py", "file_ext": "py", "file_size_in_byte": 7898, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.animation.writers.list", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.animation.writers", "line_number": 160, "usage_type": "attribute"}, {"api_name": "matplotlib.animation", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 161, "usage_type": "name"}]} +{"seq_id": "518050852", "text": "import numpy as np\nfrom scipy import sparse\nfrom abc import ABCMeta, abstractmethod\nfrom nltk.translate.bleu_score import sentence_bleu\n\nfrom utils.graph import Graph, INPUTS_NAME, GRADIENTS_NAME\nfrom neural_network.utils import InvalidShapeError, ModelArchitectureError, onehot_encode\nfrom .layers.core import Layer\nfrom .losses import Loss, CrossEntropyLoss, MeanSquaredLoss\nfrom .optimizers import Optimizer, Adam, SGD\n\n# loss functions\nMEAN_SQUARED = 'mse'\nCROSS_ENTROPY = 'crossentropy'\n\n# optimizers\nADAM = 'adam'\nSGD = 'sgd'\n\n# encoder-decoder input/output names\nENCODER_INPUTS_NAME = 'encoder_inputs'\nENCODER_GRADIENTS_NAME = 'encoder_gradients'\nDECODER_INPUTS_NAME = 'decoder_inputs'\nDECODER_GRADIENTS_NAME = 'decoder_gradients'\n\n# used when models call self.evaluate within self.fit\n# saves on splitting the data into batches again\nINPUT_IN_BATCHES = 'batched'\n\nclass Model(metaclass = ABCMeta):\n \"\"\"Abstract model class.\n \"\"\"\n def __init__(self):\n self._compiled = False\n self._optimizer = None\n self._loss = None \n\n def compile(self, optimizer, loss):\n self.optimizer = optimizer\n self._optimizer = self._get_optimizer(optimizer)\n self.loss = loss\n self._loss = self._get_loss(loss)\n self._compiled = True\n\n @property\n def layers(self): pass\n\n @abstractmethod\n def _define_graph(self, residual_connections = []):\n pass\n\n def _get_optimizer(self, optimizer):\n if isinstance(optimizer, str):\n if optimizer == ADAM:\n return Adam()\n elif optimizer == SGD:\n return SGD()\n raise ValueError(f'Optimizer of type {optimizer} not recognized. '\n f'Choose between Adam optimizer(\\'{ADAM}\\') '\n f'and stochastic gradient descent(\\'{SGD}\\')')\n \n elif isinstance(optimizer, Optimizer):\n return optimizer\n \n else:\n raise ValueError('Invalid optimizer. Please pass an object which inherits '\n 'the Optimizer class, or name of optimizer as string. '\n f'Supported optimizers: ({ADAM}, {SGD}).')\n \n def _get_loss(self, loss):\n if isinstance(loss, str):\n if loss == MEAN_SQUARED:\n return MeanSquaredLoss()\n elif loss == CROSS_ENTROPY:\n return CrossEntropyLoss()\n raise ValueError(f'Loss of type {loss} not recognized. '\n f'Choose between mean squared loss(\\'{MEAN_SQUARED}\\') '\n f'and cross-entropy loss(\\'{CROSS_ENTROPY}\\')')\n \n elif isinstance(loss, Loss):\n return loss\n \n else:\n raise ValueError('Invalid loss function. Please pass an object which inherits the Loss class, '\n 'or name of loss function as string. Supported loss functions: '\n f'({MEAN_SQUARED}, {CROSS_ENTROPY}).')\n\n def _set_names(self):\n nums = dict()\n for layer in self.layers:\n prefix = type(layer).__name__.lower()\n nums[prefix] = nums[prefix] + 1 if prefix in nums else 1\n \n layer._name = f'{prefix}_{nums[prefix]}'\n\nclass Sequential(Model):\n \"\"\"Linear stack of layers.\n \"\"\"\n def __init__(self, layers):\n super().__init__()\n if any([not isinstance(layer, Layer) for layer in layers]):\n raise TypeError('The added layer must be an instance of class Layer.')\n self.__layers = list(layers)\n \n self._set_names()\n self._define_graph()\n\n @property\n def layers(self): return self.__layers\n \n def fit(self, X, y, batch_size = 32, epochs = 1, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n \n n_samples = X.shape[0]\n self.__labels = list(set(y))\n y_onehot = onehot_encode(y) if len(self.__labels) > 2 else y.reshape(-1, 1)\n X_batches = [X[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n y_batches = [y_onehot[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n \n for it in range(epochs):\n for batch_index in range(len(X_batches)):\n y_batch = y_batches[batch_index]\n\n # y_batch would be a sparse matrix if number of labels > 2 and y was onehot encoded\n if isinstance(y_batch, sparse.csr_matrix):\n y_batch = y_batch.toarray()\n\n self._forward(X_batches[batch_index], train_mode = True)\n self._backward(X_batches[batch_index], y_batch, train_mode = True)\n \n print(f'Epoch {it + 1}:')\n loss, accuracy = self.evaluate(X_batches, y_batches, batch_size, **{INPUT_IN_BATCHES: True})\n print(f'Training loss: {loss}, training accuracy: {accuracy}')\n\n self._optimizer.decay_lr()\n\n def evaluate(self, X, y, batch_size = 32, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n\n if INPUT_IN_BATCHES in kwargs and kwargs[INPUT_IN_BATCHES] == True:\n X_batches = X\n y_oh_batches = y\n\n # y_batches elements would be sparse matrices if number of labels > 2 and y was onehot encoded\n if isinstance(y_oh_batches[0], sparse.csr_matrix):\n y_batches = [oh_batch.toarray().argmax(axis=1).reshape(-1, 1) for oh_batch in y_oh_batches]\n else:\n y_batches = y\n else:\n n_samples = X.shape[0]\n X_batches = [X[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n y_batches = [y[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n if len(self.__labels) > 2:\n y_oh_batches = [onehot_encode(batch, num_labels=len(self.__labels)) for batch in y_batches]\n else:\n y_oh_batches = y_batches\n\n loss = 0\n accuracy = 0\n n_batches = len(X_batches)\n \n for batch_index in range(n_batches):\n X_batch = X_batches[batch_index]\n y_batch = y_batches[batch_index].reshape(-1, 1)\n\n onehot_batch = y_oh_batches[batch_index]\n\n # onehot_batch would be a sparse matrix if number of labels > 2 and y was onehot encoded\n if isinstance(onehot_batch, sparse.csr_matrix):\n onehot_batch = onehot_batch.toarray()\n\n self._forward(X_batch, train_mode = False)\n activations = self.layers[-1].activations\n \n # if there is more than one activation per sample, then the labels were onehot encoded \n if self.layers[-1].activations.shape[-1] == 1:\n y_batch = y_batch\n current_loss = self._loss.get_loss(y_batch, activations)\n predictions = np.array([self.__labels[int(np.round(activation))] for activation in activations]).reshape((activations.shape[0], -1))\n else:\n current_loss = self._loss.get_loss(onehot_batch, activations)\n predictions = np.array([self.__labels[np.argmax(activation)] for activation in activations]).reshape((activations.shape[0], -1))\n loss += current_loss\n diff = y_batch - predictions\n accuracy += 1 - (np.count_nonzero(diff) / len(y_batch))\n\n loss /= n_batches\n accuracy /= n_batches\n return loss, accuracy\n \n def _forward(self, X_batch, train_mode = True, *args, **kwargs):\n self._graph.forward(inputs_dict={INPUTS_NAME: X_batch},\n additional_params={'train_mode': train_mode},\n *args,\n **kwargs)\n\n outputs = self._graph.outputs\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n \n def _backward(self, X_batch, y_batch, train_mode = True, *args, **kwargs):\n delta = self._loss.output_deriv(y = self.layers[-1].activations, t = y_batch)\n\n self._graph.backward(gradients_dict={GRADIENTS_NAME: delta},\n additional_params = {'train_mode': train_mode},\n node_func=self._optimizer.update_weights,\n *args,\n **kwargs)\n\n def _define_graph(self, residual_connections = []):\n self.layers[0].input_edges[INPUTS_NAME] = None\n self.layers[-1].output_edges[GRADIENTS_NAME] = None\n node_connections = [(self.layers[idx], self.layers[idx + 1], (INPUTS_NAME, GRADIENTS_NAME)) for idx in range(0, len(self.layers) - 1)]\n node_connections += residual_connections\n\n self._graph = Graph(node_connections)\n\nclass EncoderDecoder(Model):\n def __init__(self,\n encoder_layers,\n decoder_layers,\n link_layers,\n start_of_sequence_token_id,\n end_of_sequence_token_id,\n padding_token_id = 0):\n \"\"\"Abstract encoder-decoder architecture for sequence models.\n\n Parameters: \n 'encoder_layers' - a list of layers that will comprise the encoder\n 'decoder_layers' - a list of layers that will comprise the decoder\n 'link_layers' - a list of layers linking the encoder and the decoder\n 'start_of_sequence_token_id' - the id of the start of sequence token used\n 'end_of_sequence_token_id' - the id of the end of sequence token used\n 'padding_token_id' - id of the sequence padding token used\n\n Warning: All layers in 'link_layers' must be a part of the 'decoder_layers' list and must be able to accept two inputs (from the decoder and the encoder).\n \"\"\"\n super().__init__()\n\n if any([ll not in decoder_layers for ll in link_layers]):\n raise ModelArchitectureError('\\'link_layers\\' must be a part of the \\'decoder_layers\\' list.')\n\n self.encoder_layers = encoder_layers\n self.decoder_layers = decoder_layers\n self.link_layers = link_layers\n self.start_of_sequence_token_id = start_of_sequence_token_id\n self.end_of_sequence_token_id = end_of_sequence_token_id\n self.padding_token_id = padding_token_id\n\n self._set_names()\n self._define_graph()\n\n @property\n def layers(self): return self.encoder_layers + self.decoder_layers\n\n def fit(self, encoder_inputs, decoder_inputs, batch_size = 32, epochs = 1, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n\n ## merge batch_size and sequence length dimensions into one\n #encoder_inputs_flat = encoder_inputs.ravel()#encoder_inputs.reshape((sum(encoder_inputs.shape[:2]),) + encoder_inputs.shape[2:])\n #decoder_inputs_flat = decoder_inputs.ravel()#decoder_inputs.reshape((sum(decoder_inputs.shape[:2]),) + decoder_inputs.shape[2:])\n ##y_reshaped = y.reshape((-1,))\n n_samples = encoder_inputs.shape[0]\n \n self.__labels = list([0]*3459)#list(set(decoder_inputs))\n\n encoder_batches = [encoder_inputs[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n decoder_batches = [decoder_inputs[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n #y_batches = [y_reshaped[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n \n for it in range(epochs):\n for batch_index in range(len(encoder_batches)):\n self._forward(encoder_inputs = encoder_batches[batch_index],\n decoder_inputs = decoder_batches[batch_index],\n train_mode=True)\n self._backward(encoder_inputs = encoder_batches[batch_index],\n decoder_inputs = decoder_batches[batch_index],\n y_batch = onehot_encode(decoder_batches[batch_index],\n num_labels=len(self.__labels))\\\n .toarray()\\\n .reshape(decoder_batches[batch_index].shape + (len(self.__labels),)),\n train_mode = True)\n \n print(f'Epoch {it + 1}:')\n loss, bleu = self.evaluate(encoder_inputs=encoder_batches, y=decoder_batches, **{INPUT_IN_BATCHES: True})\n print(f'Training loss: {loss}, training BLEU score: {bleu}')\n\n self._optimizer.decay_lr()\n\n def evaluate(self, encoder_inputs, y, batch_size = 32, epochs = 1, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n\n if INPUT_IN_BATCHES in kwargs and kwargs[INPUT_IN_BATCHES] == True:\n encoder_batches = encoder_inputs\n y_batches = y\n else:\n ## merge batch_size and sequence length dimensions into one\n #encoder_inputs = encoder_inputs.ravel()\n n_samples = encoder_inputs.shape[0]\n\n encoder_batches = [encoder_inputs[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n y_reshaped = y.reshape((-1,))\n y_batches = [y_reshaped[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n\n loss = 0\n bleu_score = 0\n n_batches = len(encoder_inputs)\n \n for batch_index in range(n_batches):\n encoder_batch = encoder_batches[batch_index]\n y_batch = y_batches[batch_index]\n onehot_batch = onehot_encode(y_batch, num_labels=len(self.__labels)).toarray()\n\n self._graph.clear_messages()\n self._forward(encoder_inputs=encoder_batch, decoder_inputs=y_batch, train_mode = False)\n activations = self.decoder_layers[-1].activations\n \n if self.layers[-1].activations.shape[-1] == 1:\n current_loss = self._loss.get_loss(y_batch.reshape(-1, 1), activations)\n hypotheses = np.array([self.__labels[int(np.round(activation))] for activation in activations])\n else:\n current_loss = self._loss.get_loss(onehot_batch, activations)\n hypotheses = np.array([self.__labels[np.argmax(sentence)] for activation in activations for sentence in activation]).reshape(activations.shape[:-1])\n loss += current_loss\n bleu_score += np.mean([sentence_bleu([y_batch[idx]], hypotheses[idx]) for idx in range(len(y_batch))])\n\n\n loss /= n_batches\n bleu_score /= n_batches\n return loss, bleu_score\n\n def _forward(self, encoder_inputs, decoder_inputs, train_mode = True, *args, **kwargs):\n if train_mode:\n self._graph.forward(inputs_dict={ENCODER_INPUTS_NAME: encoder_inputs, DECODER_INPUTS_NAME: decoder_inputs},\n additional_params={'train_mode': train_mode})\n\n return self._graph.outputs[0]\n else:\n outputs_all = []\n for sequence_index in range(encoder_inputs.shape[0]):\n sequence_encoder_inputs = encoder_inputs[sequence_index][None]\n \n decoder_inputs = np.hstack([np.array([[self.start_of_sequence_token_id]]), np.ones((1, encoder_inputs.shape[1] - 1)) * self.padding_token_id])\n \n self._graph.forward(inputs_dict={ENCODER_INPUTS_NAME: sequence_encoder_inputs, DECODER_INPUTS_NAME: decoder_inputs},\n additional_params={'train_mode': train_mode})\n\n # \"freeze\" encoder layers so the graph skips them in the forward iteration as they have already done their computations\n for layer in self.encoder_layers:\n layer.frozen = True\n\n output_index = 1\n outputs = self._graph.outputs[0]#.reshape(sequence_encoder_inputs.shape + (self._graph.outputs[0].shape[-1],))\n\n # recompute decoder outputs until it predicts an end-of-sequence token at element output_index\n # each time adding the output at index output_index to the decoder input\n while outputs[0][output_index].argmax() != self.end_of_sequence_token_id and output_index < encoder_inputs.shape[1] - 1:\n decoder_inputs[0, output_index] = outputs[0][output_index].argmax()\n\n # clear messages from decoder layers as they will have to be recomputed\n for layer in self.decoder_layers:\n layer.clear_child_edges()\n\n self._graph.forward(inputs_dict={DECODER_INPUTS_NAME: decoder_inputs},\n additional_params={'train_mode': train_mode})\n output_index += 1\n\n outputs_all.append(self._graph.outputs[0])\n\n # \"unfreeze\" encoder layers (see reasons for freezing above)\n for layer in self.encoder_layers:\n layer.frozen = False\n\n self._graph.clear_messages()\n\n return np.array([out[0] for out in outputs_all])\n\n def _backward(self, encoder_inputs, decoder_inputs, y_batch, train_mode = True, *args, **kwargs):\n activations = self.decoder_layers[-1].activations\n delta = self._loss.output_deriv(y = activations, t = y_batch)\n\n self._graph.backward(gradients_dict={GRADIENTS_NAME: delta},\n additional_params = {ENCODER_INPUTS_NAME: encoder_inputs,\n DECODER_INPUTS_NAME: decoder_inputs,\n 'train_mode': train_mode},\n node_func=self._optimizer.update_weights,\n *args,\n **kwargs)\n\n def _define_graph(self, residual_connections = []):\n self.encoder_layers[0].input_edges[ENCODER_INPUTS_NAME] = None\n self.decoder_layers[0].input_edges[DECODER_INPUTS_NAME] = None\n self.decoder_layers[-1].output_edges[GRADIENTS_NAME] = None\n\n enco_conn = [(self.encoder_layers[idx], self.encoder_layers[idx + 1], (INPUTS_NAME, GRADIENTS_NAME)) for idx in range(0, len(self.encoder_layers) - 1)]\n deco_conn = [(self.decoder_layers[idx], self.decoder_layers[idx + 1], (INPUTS_NAME, GRADIENTS_NAME)) for idx in range(0, len(self.decoder_layers) - 1)]\n link_conn = [(self.encoder_layers[-1], link, (ENCODER_INPUTS_NAME, GRADIENTS_NAME)) for link in self.link_layers]\n node_connections = enco_conn + deco_conn + link_conn + residual_connections\n\n self._graph = Graph(node_connections)\n\n", "sub_path": "neural_network/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 19457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "optimizers.SGD", "line_number": 18, "usage_type": "name"}, {"api_name": "abc.ABCMeta", "line_number": 30, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 48, "usage_type": "name"}, {"api_name": "optimizers.Adam", "line_number": 55, "usage_type": "call"}, {"api_name": "optimizers.SGD", "line_number": 56, "usage_type": "name"}, {"api_name": "optimizers.SGD", "line_number": 57, "usage_type": "call"}, {"api_name": "optimizers.SGD", "line_number": 60, "usage_type": "name"}, {"api_name": "optimizers.Optimizer", "line_number": 62, "usage_type": "argument"}, {"api_name": "optimizers.SGD", "line_number": 68, "usage_type": "name"}, {"api_name": "losses.MeanSquaredLoss", "line_number": 73, "usage_type": "call"}, {"api_name": "losses.CrossEntropyLoss", "line_number": 75, "usage_type": "call"}, {"api_name": "losses.Loss", "line_number": 80, "usage_type": "argument"}, {"api_name": "layers.core.Layer", "line_number": 101, "usage_type": "argument"}, {"api_name": "layers.core", "line_number": 101, "usage_type": "name"}, {"api_name": "layers.core", "line_number": 103, "usage_type": "argument"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 118, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 127, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 127, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 149, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 149, "usage_type": "name"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 158, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 173, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 196, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 210, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 217, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 218, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 219, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 219, "usage_type": "name"}, {"api_name": "utils.graph.Graph", "line_number": 222, "usage_type": "call"}, {"api_name": "neural_network.utils.ModelArchitectureError", "line_number": 247, "usage_type": "call"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 286, "usage_type": "call"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 335, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.sentence_bleu", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 386, "usage_type": "call"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 392, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 403, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 405, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 405, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 406, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 406, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 407, "usage_type": "name"}, {"api_name": "utils.graph.Graph", "line_number": 410, "usage_type": "call"}]} +{"seq_id": "14428372", "text": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport sys\nsys.path.append('..')\nfrom models import *\nfrom utils import Logger\nimport numpy as np\nimport progressbar\nimport time\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch CIFAR model Train')\nparser.add_argument('--batch-size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=512, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--alpha', type=float, default=2.0, help='Orthogonality of the weight') # 2.0-7.5\nparser.add_argument('--delta', type=float, default=0.1, help='change of args of weight') # 0.08 0.1 0.12\n# delta 0.08-4 0.1-6 0.12-7\nargs = parser.parse_args()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\nelse:\n print(\"No cuda participate.\")\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntest_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('../data', train=False, transform=transforms.Compose([transforms.ToTensor(),])),\n batch_size=args.test_batch_size, shuffle=False, **kwargs)\n\n\nmodel = Lsoftmax_VGG16(margin=1)\nif args.cuda:\n model.cuda()\nfilename = '../checkpoint/vgg16/vgg16_lsoftmax.pth'\ncheckpoint = torch.load(filename)\nmodel.load_state_dict(checkpoint)\n\nwith torch.no_grad():\n for name, parameters in model.named_parameters():\n print(name,':',parameters.size())\n if name == 'lsoftmax_linear.weight': weight = parameters.cpu().numpy()\n\n\ndef inference():\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n # bar = progressbar.ProgressBar(max_value=10000//args.test_batch_size + 1)\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n _, output = model(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item()\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n # bar.update(bi)\n # bar.finish()\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * float(correct) / len(test_loader.dataset)))\n print(f'weight_size: {weight.shape}')\n np.save('../weight_vector/vgg16/weight_lsoftmax', weight)\n\nif __name__=='__main__':\n inference()", "sub_path": "lab_vgg16/get_weight.py", "file_name": "get_weight.py", "file_ext": "py", "file_size_in_byte": 3711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "35034945", "text": "#!/usr/bin/env python3\nfrom unittest import TestCase, mock\nimport pytest\nimport mailroom_v4\nimport unittest\nfrom io import StringIO\nfrom testfixtures import tempdir, compare\nimport os\n\n\n@pytest.mark.parametrize('name, amount, expected', [\n (\"dan\", '50', \"Thank you dan for donating 50 dollars generously.\"),\n (\"jeff\", '60', \"Thank you jeff for donating 60 dollars generously.\")\n])\ndef test_thank_you_letter_positive(name, amount, expected):\n result = str(mailroom_v4.thank_you_letter(name, amount))\n assert expected == result\n\n\n@pytest.mark.parametrize('name, amount, expected', [\n (\"dan\", 50, \"Thank you sam for donating 50 dollars generously.\"),\n (\"jeff\", 60, \"Thank you for donating 60 dollars generously.\")\n])\ndef test_thank_you_letter_negitive(name, amount, expected):\n result = str(mailroom_v4.thank_you_letter(name, amount))\n assert expected != result\n\n\ntesting_donors_data = {\"testname1\": [200, 20, 35.5],\n \"testname2\": [500, 20],\n \"Susan\": [1000, 20, 70],\n \"Rob\": [250, 20],\n }\n\n\n@unittest.mock.patch('mailroom_v4.donor_details')\ndef test_donor_details(mock_donor_details):\n mailroom_v4.donor_details(testing_donors_data)\n mock_donor_details.assert_called_with(testing_donors_data)\n\n\ndef test_amount_validate_positive():\n assert mailroom_v4.amount_validate(float(20))\n\n\n@unittest.mock.patch('mailroom_v4.amount_validate')\ndef test_amount_validate_negitive(mock_amount_validate):\n mailroom_v4.amount_validate(-10)\n mock_amount_validate.assert_called_with(-10)\n\n\n@unittest.mock.patch('mailroom_v4.update_data_print_thanks')\ndef test_thank_you(mock_update_data_print_thanks):\n mailroom_v4.update_data_print_thanks(float(10), \"name1\")\n assert mock_update_data_print_thanks.called\n\n\n@unittest.mock.patch('sys.stdout', new_callable=StringIO)\ndef test_create_report(mock_stdout,):\n mailroom_v4.create_report()\n assert mock_stdout.getvalue() == '''Donor Name | Total Given |Num Gifts | Aver\n------------------------------------------------------------------------------------------\nJohn $ 255.5 3 $ 85.17\nJeff $ 520 2 $ 260.0\nSusan $ 1090 3 $ 363.3\nRob $ 270 2 $ 135.0\nRoss $ 200 1 $ 200.0\\n'''\n\n\n@unittest.mock.patch('mailroom_v4.send_letters_all')\ndef test_send_letters_all_call(mock_send_letters_all_call):\n test_send_donors_data = {\"testname3\": [200, 20, 35.5],\n \"testname2\": [500, 20],\n \"Susan\": [1000, 20, 70],\n \"Rob\": [250, 20],\n }\n mailroom_v4.send_letters_all(**test_send_donors_data)\n assert mock_send_letters_all_call.called\n #compare(dir.read('Susan.txt'), b'some thing')\n\n\ntest_send_letters_all_call()\n\n\n# @unittest.mock.patch('mailroom_v4.send_letters_all')\n# def test_send_letters_all(mock_test_send_letters_all_call):\n# test_send_donors_data = {\"testname3\": [200, 20, 35.5],\n# \"testname2\": [500, 20],\n# \"Susan\": [1000, 20, 70],\n# \"Rob\": [250, 20],\n# }\n# print(mock_test_send_letters_all_call(**test_send_donors_data))\n# print(mailroom_v4.send_letters_all(**test_send_donors_data))\n#\n# assert os.path.isfile(\"testname3.txt\") == 1\n\n\n\n#test_send_letters_all\n", "sub_path": "students/g_rama/lesson06/test_mailroom_v4.py", "file_name": "test_mailroom_v4.py", "file_ext": "py", "file_size_in_byte": 3661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "mailroom_v4.thank_you_letter", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "mailroom_v4.thank_you_letter", "line_number": 25, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "mailroom_v4.donor_details", "line_number": 38, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 36, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 36, "usage_type": "attribute"}, {"api_name": "mailroom_v4.amount_validate", "line_number": 43, "usage_type": "call"}, {"api_name": "mailroom_v4.amount_validate", "line_number": 48, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 46, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 46, "usage_type": "attribute"}, {"api_name": "mailroom_v4.update_data_print_thanks", "line_number": 54, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 52, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 52, "usage_type": "attribute"}, {"api_name": "mailroom_v4.create_report", "line_number": 60, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 58, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 58, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 58, "usage_type": "name"}, {"api_name": "mailroom_v4.send_letters_all", "line_number": 77, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 70, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "45340386", "text": "import unittest\nimport gzip\n\nfrom pkg_resources import resource_filename\nfrom testtools import TestCase\nfrom testtools.matchers import *\n\nfrom propertysuggester.test.parser.test_abstract_reader import AbstractUniverseTest\nfrom propertysuggester.parser import XmlReader\nfrom propertysuggester.utils.datamodel import Claim, Snak, Entity\n\nclass XmlReaderTest(AbstractUniverseTest):\n def test_universe(self):\n with gzip.open(resource_filename(__name__, \"Wikidata-Q1.xml.gz\"), \"r\") as f:\n result = list(XmlReader.read_xml(f))\n self.assert_universe(result)\n\n def test_updated_dump(self):\n with gzip.open(resource_filename(__name__, \"Wikidata-Q9351.xml.gz\"), \"r\") as f:\n result = list(XmlReader.read_xml(f))\n\n self.assertThat(len(result), Equals(1))\n q9351 = result[0]\n self.assertThat(q9351.title, Equals(\"Q9351\"))\n self.assertThat(q9351.claims, Contains(Claim(Snak(156, \"wikibase-item\", \"Q1647331\"))))\n self.assertThat(q9351.claims, Contains(Claim(Snak(1112, \"quantity\", \"+25\"))))\n\n def test_special_cases(self):\n self.assertThat(XmlReader._process_json((\"Q1\", \"{}\")), Equals(Entity(\"Q1\", [])))\n self.assertThat(XmlReader._process_json((\"Q1\", '{\"claims\":[{\"m\":[\"value\",\"\",\"bad\"], \"refs\":[],\"q\":[]}]}')),\n Equals(Entity(\"Q1\", [])))\n self.assertThat(XmlReader._process_json((\"Q1\", '{\"claims\":[{\"m\":[\"value\",\"\",\"unknown\"], \"refs\":[],\"q\":[]}]}')),\n Equals(Entity(\"Q1\", [])))\n\nclass MultiprocessingBigTest(TestCase):\n def test_simple_multiprocessing(self):\n r1 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-Q1.xml.gz\")), 1))\n r4 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-Q1.xml.gz\")), 4))\n\n self.assertThat(r1, HasLength(1))\n self.assertThat(r4, Equals(r1))\n\n def test_multiprocessing(self):\n r1 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-20131129161111.xml.gz\")), 1))\n r4 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-20131129161111.xml.gz\")), 4))\n\n self.assertThat(r1, HasLength(87))\n self.assertThat(r4, Equals(r1))\n\nif __name__ == '__main__':\n unittest.main()\n\n", "sub_path": "propertysuggester/test/parser/test_xml_reader.py", "file_name": "test_xml_reader.py", "file_ext": "py", "file_size_in_byte": 2293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "propertysuggester.test.parser.test_abstract_reader.AbstractUniverseTest", "line_number": 12, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 14, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 14, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 15, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 15, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 19, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 19, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 20, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 20, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Claim", "line_number": 25, "usage_type": "call"}, {"api_name": "propertysuggester.utils.datamodel.Snak", "line_number": 25, "usage_type": "call"}, {"api_name": "propertysuggester.utils.datamodel.Claim", "line_number": 26, "usage_type": "call"}, {"api_name": "propertysuggester.utils.datamodel.Snak", "line_number": 26, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader._process_json", "line_number": 29, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 29, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Entity", "line_number": 29, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader._process_json", "line_number": 30, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 30, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Entity", "line_number": 31, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader._process_json", "line_number": 32, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 32, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Entity", "line_number": 33, "usage_type": "call"}, {"api_name": "testtools.TestCase", "line_number": 35, "usage_type": "name"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 37, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 37, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 37, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 37, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 38, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 38, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 38, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 38, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 44, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 44, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 44, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 44, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 45, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 45, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 45, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 45, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "610330026", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 19 11:33:08 2020\n\n@author: ramravi\n\"\"\"\n\n#importing the necessary libraries\n \nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom pandas import plotting\n\n#for visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.style.use('fivethirtyeight')\n\n#for interactive visualizations\nimport plotly.offline as py\nfrom plotly.offline import init_notebook_mode, iplot\nimport plotly.graph_objs as go\nfrom plotly import tools\ninit_notebook_mode(connected=True)\nimport plotly.figure_factory as ff\n\n#importing the dataset\ndata= pd.read_csv('mallcustomersegmentation.csv')\n\ndat=ff.create_table(data.head())\n\npy.iplot(dat)\n\n\ndata.describe()\n\n#checking if there is null data\ndata.isnull().any().any()\n\n#plotting the andrews_curve\nplt.rcParams['figure.figsize']=(15,10)\n\nplotting.andrews_curves(data.drop('CustomerID', axis=1), 'Gender')\nplt.title('Andrew curves for gender', fontsize=20)\nplt.show()\n\n# the andrews curve preserves the means, distance(up to a constant) adn variances. \n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nplt.rcParams['figure.figsize']=(18,8)\n\nplt.subplot(1,2,1)\nsns.set(style='whitegrid')\nsns.distplot(data['Annual Income (k$)'])\nplt.title('annual income distribution', fontsize=20)\nplt.xlabel('Range of Annual Income')\nplt.ylabel('Count')\n\nplt.subplot(1,2,2)\nsns.set(style='whitegrid')\nsns.distplot(data['Age'], color='red')\nplt.title('Distribution of Age', fontsize=20)\nplt.xlabel('Range of age')\nplt.ylabel('count')\n\n# we can infer one thing that There are few people who earn more than 100 US Dollars. Most of the people have an earning of around 50-75 US Dollars. Also, we can say that the least Income is around 20 US Dollars.\n \n# Taking inferences about the Customers.\n\n# The most regular customers for the Mall has age around 30-35 years of age. Whereas the the senior citizens age group is the least frequent visitor in the Mall. Youngsters are lesser in umber as compared to the Middle aged people.\n\n\nlabels=['Female','Male']\nsize=data['Gender'].value_counts()\ncolors=['lightgreen', 'orange']\nexplode=[0,0.1]\n\nplt.rcParams['figure.figsize']=(9,9)\nplt.pie(size, explode=explode, labels=labels, autopct='%.2f%%', shadow=True)\nplt.title('Gender Pie distribution')\nplt.axis('off')\nplt.legend()\nplt.show()\n\n#if you can see the pie chart, it is clear that female gender leads the male count by atleast 56%\n# that is a huge gap specially when the population of Males is comparatively higher than females\n\nplt.rcParams['figure.figsize'] = (15, 8)\nsns.countplot(data['Age'], palette = 'hsv')\nplt.title('Distribution of Age', fontsize = 20)\nplt.show()\n\n#This graph shows a more interactive chart about the distribution of each Age grou in the mall.\n#it is seen that the ages from 27 to 39 are very much frequent but there is no clear pattern. Interesting Fact, There are equal no. of Visitors in the Mall for the Agee 18 and 67. People of Age 55, 56, 69, 64 are very less frequent in the Malls. People at Age 32 are the Most Frequent Visitors in the Mall.\n\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.countplot(data['Annual Income (k$)'], palette='hsv')\nplt.title('Distribution of Annual Income', fontsize=25)\nplt.show()\n\n#Interesting Fact, There are equal no. of Visitors in the Mall for the Agee 18 and 67. People of Age 55, 56, 69, 64 are very less frequent in the Malls. People at Age 32 are the Most Frequent Visitors in the Mall.\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.countplot(data['Spending Score (1-100)'], palette='copper')\nplt.title('Distribution of Spending score', fontsize=25)\nplt.show()\n\n#this is the most important chart of all. \n#This shows that the mall has a variety of customers coming in since the chart here shows a spending score from 1 till 99. This shoes that the mall caters to the needs of different class of poeple. However, the most cutomers spending score lies between 35-60.\n\nsns.pairplot(data)\nplt.title('Paiplot for the data', fontsize=20)\nplt.show()\n\n# This shows the relationship between each feature variable with itself and with the other variables in the table. This helps in finding the hidden relationship between the chosen variable(target) and the other important features selected.\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.heatmap(data.corr(), cmap='Wistia', annot=True)\nplt.title('Correlation matrix')\nplt.show()\n\n#If you can see the matrix, the features does not have any good correlation, thus proceeding with all the features.\n\n#Bi-Variate Analysis\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.boxenplot('Gender','Spending Score (1-100)',data=data,palette='Blues')\nplt.title('Bi-Variate Analysis of gender and spending score')\nplt.show()\n\n\n#This shows the spending score of male is around 25k to 70k whearas the female gender has a spending score of 35k to 75k.This shows the clear domination of female gender in the shopping arena!\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.boxplot('Gender', 'Annual Income (k$)', data=data, palette='rainbow')\nplt.title('Bivariate analysis Gender vs Annual Income', fontsize=20)\nplt.show()\n\n#This is that the male has higher average salary than the female gender, while if you compare lower income, both the gender is almost equal.\n\nx=data['Annual Income (k$)']\ny=data['Age']\nz=data['Spending Score (1-100)']\n\nsns.lineplot(x,y,color='blue')\nsns.lineplot(x,z,color='pink')\nplt.title('Multivariate anaysis of age vs annual income vs spending score')\nplt.show()\n\n#the above chart shows the relationship between age and annula income and also annual income and spending score.\n\n#Clustering analysis\nx=data.iloc[:,[3,4]].values\n\n\n#k means Algorithm\n\n#elbow method to find the number of optimum clusters\nfrom sklearn.cluster import KMeans\nwcss=[]\nfor i in range(1,11):\n km=KMeans(n_clusters=i, init='k-means++',max_iter=300,\n n_init=10, random_state=0)\n km.fit(x)\n wcss.append(km.inertia_)\n \nplt.plot(range(1,11), wcss)\nplt.title('The elbow method', fontsize=20)\nplt.xlabel('No of clusters')\nplt.ylabel('wcss')\nplt.show()\n\n#visualizing the clusters\nkm=KMeans(n_clusters=5, init='k-means++', max_iter=300,\n n_init=10, random_state=0)\ny_means=km.fit_predict(x)\n\nplt.scatter(x[y_means==0,0], x[y_means==0,1], \n s=100, c='pink', label='misser')\nplt.scatter(x[y_means==1,0], x[y_means==1,1], s=100, c='yellow',\n label='general')\nplt.scatter(x[y_means==2,0], x[y_means==2,1], s=100, c='cyan', \n label='target')\nplt.scatter(x[y_means==3,0], x[y_means==3,1], s=100, c='magenta',\n label='spendthrift')\nplt.scatter(x[y_means==4,0], x[y_means==4,1],s=100, c='orange',\n label='careful')\nplt.scatter(km.cluster_centers_[:,0],km.cluster_centers_[:,1], s=50, c='blue', label='centeriod')\n\n\nplt.style.use('fivethirtyeight')\nplt.title('K means Clsutering', fontsize=20)\nplt.xlabel('Annaul Income')\nplt.ylabel('Spending score')\nplt.legend()\nplt.grid()\nplt.show()\n\n\n#there are five segments in the mall and the label explains them in breifly.The mall authorities have to take care of the careul categories to avail some benefits so that they move to the general category.\n\n\n#Hierarchial Clustering\n\n#using dendograms\n\nimport scipy.cluster.hierarchy as sch\ndendogram=sch.dendrogram(sch.linkage(x, method='ward'))\nplt.title('dendogram',fontsize=20)\nplt.xlabel('customers')\nplt.ylabel('Ecuclidian Distance')\nplt.show()\n\n\n\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nhc=AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')\ny_hc=hc.fit_predict(x)\n\nplt.scatter(x[y_hc==0,0], x[y_hc==0,1], s=100, c='pink', label='misser')\nplt.scatter(x[y_hc==1,0], x[y_hc==1,1], s=100, c='yellow', label='general')\nplt.scatter(x[y_hc==2,0], x[y_hc==2,1], s=100, c='orange', label='target')\nplt.scatter(x[y_hc==3,0], x[y_hc==3,1], s=100, c='magenta', label='spendthrift')\nplt.scatter(x[y_hc==4,0], x[y_hc==4,1], s=100, c='cyan', label='careful')\nplt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], s=100, c='blue',label='centroid')\n\nplt.style.use('fivethirtyeight')\nplt.title('Cluster analysis-hierarchial Clustering', fontsize=20)\nplt.xlabel('Annual income')\nplt.ylabel('spending score (1-100)')\nplt.legend()\nplt.grid()\nplt.show()\n\n#age and spending score:\n \n \nx= data.iloc[:,[2,4]].values\n\nwcss=[]\nfor i in range(1,11):\n km=KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0)\n km.fit(x)\n wcss.append(km.inertia_)\n \nplt.plot(range(1,11),wcss)\nplt.title('The elbow method', fontsize=20)\nplt.xlabel('No of clusters')\nplt.ylabel('wcss')\nplt.show()\n\nkm=KMeans(n_clusters=4, init='k-means++', max_iter=300,\n n_init=10, random_state=0)\ny_means=km.fit_predict(x)\n\nplt.scatter(x[y_means==0,0], x[y_means==0,1], \n s=100, c='pink', label='target customer')\nplt.scatter(x[y_means==1,0], x[y_means==1,1], s=100, c='yellow',\n label='priority')\nplt.scatter(x[y_means==2,0], x[y_means==2,1], s=100, c='cyan', \n label='usual customer')\nplt.scatter(x[y_means==3,0], x[y_means==3,1], s=100, c='magenta',\n label='target old customer')\nplt.scatter(km.cluster_centers_[:,0],km.cluster_centers_[:,1], s=50, c='blue', label='centeriod')\n\n\nplt.style.use('fivethirtyeight')\nplt.title('K means Clustering', fontsize=20)\nplt.xlabel('Age')\nplt.ylabel('Spending score')\nplt.legend()\nplt.grid()\nplt.show()\n\n#the age and spending score by looking at the above chart, we have the usual customer spread over all ages. And we also have the target customers with young and old ages.Then after getting the results we can accordingly make different marketing strategies and policies to optimize the spending scores of the customer in the Mall.\n\nx=data[['Age','Spending Score (1-100)', 'Annual Income (k$)']].values\nkm=KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init= 10, random_state=0)\nkm.fit(x)\nlabels=km.labels_\ncentroids=km.cluster_centers_\n\ndata['labels']= labels\ntrace1= go.Scatter3d(\n x= data['Age'],\n y=data['Spending Score (1-100)'],\n z=data['Annual Income (k$)'],\n mode='markers',\n marker=dict(\n color=data['labels'],\n size=10,\n line=dict(\n color=data['labels'],\n width=12\n ),\n opacity=0.8\n )\n )\ndf=[trace1]\n\nlayout= go.Layout(\n title='Character vs Gender vs Alive or not',\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=0\n ),\n scene=dict(\n xaxis=dict(title='Age'),\n yaxis=dict(title='Spending Score'),\n zaxis=dict(title='Annual Income')\n )\n )\nfig=go.Figure(data=df, layout=layout)\npy.offline.plot(fig)\n\n# this is a multivariate analysis of age vs annual income vs spending score.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "mallcustomersegmentation.py", "file_name": "mallcustomersegmentation.py", "file_ext": "py", "file_size_in_byte": 10827, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "plotly.offline.init_notebook_mode", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "plotly.figure_factory.create_table", "line_number": 31, "usage_type": "call"}, {"api_name": "plotly.figure_factory", "line_number": 31, "usage_type": "name"}, {"api_name": "plotly.offline.iplot", "line_number": 33, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "pandas.plotting.andrews_curves", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.plotting", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 53, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 56, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 63, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 81, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 91, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 100, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 107, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "seaborn.pairplot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 121, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 130, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "seaborn.boxenplot", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 138, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 149, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 195, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "scipy.cluster.hierarchy.dendrogram", "line_number": 212, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy", "line_number": 212, "usage_type": "name"}, {"api_name": "scipy.cluster.hierarchy.linkage", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "sklearn.cluster.AgglomerativeClustering", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 233, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 273, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 284, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter3d", "line_number": 290, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 290, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 307, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 307, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 321, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 321, "usage_type": "name"}, {"api_name": "plotly.offline.offline.plot", "line_number": 322, "usage_type": "call"}, {"api_name": "plotly.offline.offline", "line_number": 322, "usage_type": "attribute"}, {"api_name": "plotly.offline", "line_number": 322, "usage_type": "name"}]} +{"seq_id": "528547404", "text": "# -*- coding: utf-8 - Python 3.5 *-\n\"\"\"\nDescription: Reads Adcirc Global Output file fort.63 & returns time series at\nselected nodes.\nInput(s): fort.63, Nodes of interest\nOutput(s): Time series .txt files\njdorvinen@dewberry.com, slawler@dewberry.com\nCreated on Tue Apr 19 15:08:33 2016\n\"\"\"\n#---------------------------------------Load Python Modules---------------------------------------#\n#import fileinput\nfrom datetime import datetime as dt\nfrom copy import deepcopy\nimport os\nfrom NODES_LIST import NODES_LIST\nfrom TRANSECTS import TRANSECTS\nimport numpy as np\n\n#------------------------------------------User Inputs--------------------------------------------#\nPARENT_DIR = \"P:/02/LakeOntario/Storm/\"\nINPUTFILES = [\"fort.63\", \"swan_TP.63\", \"swan_HS.63\"]\nSTORM_LIST = [\"19740314\", \"19770107\", \"19800109\", \"20061026\", \"19710301\"]\nPARAMETERS = {\"fort.63\":\"SWEL\", \"swan_TP.63\":\"TPS\", \"swan_HS.63\":\"HS\"}\n\n#------------------------------------------BEGIN SCRIPT-------------------------------------------#\n\ndef extract(root):\n\n \"\"\"Extracts data from ADCIRC time series files\"\"\"\n nodes_list = deepcopy(NODES_LIST)\n for filed in INPUTFILES:\n print(\"Extracting \"+root+\"/\"+filed)\n f63 = os.path.join(root, filed) #-- 63 files\n with open(f63) as fin:\n for line in fin:\n mynode = line.strip().split(' ')[0] #--Test each line\n if mynode in nodes_list.keys():\n value = line.strip().split()[1]\n nodes_list[mynode][PARAMETERS[filed]].append(value)\n return nodes_list\n\ndef write_data(root, nodes_list):\n \"\"\" Write extracted data to files \"\"\"\n for transect in TRANSECTS:\n for node in TRANSECTS[transect]:\n filename = \"transect_{0}_node_{1}.txt\".format(transect,\n node)\n length = max([len(nodes_list[node]['SWEL']),\n len(nodes_list[node]['HS']),\n len(nodes_list[node]['TPS'])])\n timesteps = np.arange(0, length)\n with open(os.path.join(root, filename), 'w') as savefile:\n for step in timesteps:\n time = '{:>12}'.format(str((step)*1800))\n if step == 0:\n swel = '{:>24}'.format('nan')\n else:\n try:\n swel = '{:>24}'.format(nodes_list[node]['SWEL'][step-1])\n except LookupError:\n swel = '{:>24}'.format('nan')\n try:\n hsig = '{:>24}'.format(nodes_list[node]['HS'][step])\n except LookupError:\n hsig = '{:>24}'.format('nan')\n try:\n tps = '{:>24}'.format(nodes_list[node]['TPS'][step])\n except LookupError:\n tps = '{:>24}'.format('nan')\n line = time+swel+hsig+tps+\"\\n\"\n savefile.write(line)\n\n#------------------------------------------MAIN FUNCTION------------------------------------------#\ndef main():\n\n \"\"\"Main function, runs extract() funtion and times it.\"\"\"\n\n start_time = dt.now()\n print(\"\\n==========START========= \\n\")\n print('Begin extracting data:\\n')\n print(start_time)\n\n for storm in STORM_LIST:\n root = os.path.join(PARENT_DIR, storm)\n nodes_list = extract(root)\n write_data(root, nodes_list)\n\n end_time = dt.now()\n tda = str(end_time-start_time).split('.')[0].split(':')\n print(\"\\n===========END==========\\n\")\n print(\"Processing Time :\\n\")\n print(\"{0} hrs, {1} mins, {2} sec \\n\\n\".format(tda[0], tda[1], tda[2]))\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "lake_ontario/extract_from_63_list_09022016_offset_3in1.py", "file_name": "extract_from_63_list_09022016_offset_3in1.py", "file_ext": "py", "file_size_in_byte": 3796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "copy.deepcopy", "line_number": 30, "usage_type": "call"}, {"api_name": "NODES_LIST.NODES_LIST", "line_number": 30, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "TRANSECTS.TRANSECTS", "line_number": 44, "usage_type": "name"}, {"api_name": "TRANSECTS.TRANSECTS", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "471922606", "text": "\"\"\"EK80 data client\n\nAuthor: Terje Nilsen, Kongsberg Maritime AS\n\"\"\"\n\nimport socket\nimport time\nimport datetime\nimport binascii\nimport threading\nimport sys\nimport requests\n\nimport xmltodict\nimport xml.etree.ElementTree as ET\nfrom struct import *\nfrom collections import namedtuple\nfrom pprint import pprint\n\n# comment out the line below to disable debug-level messages\nlogging.basicConfig(level=logging.DEBUG)\n\n# PS: Enabling debug output might in som cases delay the handling and cause errors.\n# If you start to get lost messages, disable debug and retest.\n# If this helps, then remove some output messages in the EK80_data function.\n# The EK80_data function is time critical...\n\ndef bytes_to_int(bs):\n \"\"\"Convert a byte string to int(16)\n \"\"\"\n return int(bs[0]) + int(bs[1]*256)\n\nclass t9ek80:\n \"\"\"\n \"\"\"\n#----------------------------------------------------------------------------\n# Method report\n# Description User defined REPORT function, this is to be adapter to individual needs.\n# It receives a list for parameters and meta data to process...\n#-----------------------------------------------------------------------------\n# For motion simulation only, to be removed...\n def __init__(self, argv):\n\n self.error = 0 # Class Error handler...\n\n # Data that will be read from the xml file...\n # PS: These walues will be overwritten...\n self.UDP_IP = \"127.0.0.1\"\n self.UDP_PORT = 37655\n self.UDP_DATA = 0\n self.desimate = 0\n\n self.NMEA_DATA = 0 # Will be set by the XML handler...\n\n # KDI_TCP_IP = \"127.0.0.1\"\n # KDI_TCP_PORT = 55035\n # USEKOGNIFAI = 0 #True\n\n self.Status_Command = 1\n self.Status_Data = 2\n self.Status_NMEA = 4\n self.Status_Done = 8\n self.Status_Running = 16\n\n # globale variable\n self.client_seq_no = 1\n self.mtypeName = \"\"\n self.itypeVal = \"\"\n self.itypeSize = 0\n self.EK_req = \"\"\n self.EK_Value = 0\n self.EK_Type = \"\"\n self.desimated = 0\n self.finale_data = b\"\"\n self.mtype = \"\"\n self.running = 0 # 0x1FF when all prosesses running...\n self.totalbytes = 0\n\n self.config = \"config.xml\"\n self.busy = 0\n self.mode = -1\n self.cont = False\n\n self.debug = self.getDebug()\n\n #FIXME: the code below should be moved to a load_config() method\n\n # Get extra parameters...\n if len(argv) == 3:\n self.mode = int(argv[2])\n\n # count the arguments\n if len(argv) < 2:\n print(\"Usage: python3 tescast.py config.xml [transponder]\")\n self.error = -1\n else:\n print(\"Initializes config file: \"+argv[1])\n arguments = len(argv)\n if arguments >= 2:\n config = argv[1]\n\n # Open the default channel...\n tree = ET.parse(config)\n root = tree.getroot()\n\n for table in root.iter('Configuration'):\n for child in table:\n if child.tag == 'EK80':\n for child2 in child:\n if child2.tag == 'EK80_IP':\n self.UDP_IP = child2.text\n if child2.tag == 'EK80_PORT':\n self.UDP_PORT = int(child2.text)\n if child2.tag == 'EK80_DATA':\n self.UDP_DATA = int(child2.text)\n if child2.tag == 'NMEA_DATA':\n self.NMEA_DATA = int(child2.text)\n if child2.tag == 'DESIMATE':\n self.desimate = int(child2.text)\n\n # if child.tag == 'Cloud':\n # for child2 in child:\n # if child2.tag == 'KDI_TCP_IP':\n # self.KDI_TCP_IP = child2.text\n # if child2.tag == 'KDI_TCP_PORT':\n # self.KDI_TCP_PORT = int(child2.text)\n # if child2.tag == 'USEKOGNIFAI':\n # self.USEKOGNIFAI = int(child2.text)\n\n if child.tag == 'Request':\n for child2 in child:\n if child2.tag == 'req':\n self.EK_req = child2.text\n if child2.tag == 'req2':\n self.EK_Value = child2.text\n if child2.tag == 'req3':\n self.EK_Type = child2.text\n if child2.tag == 'res':\n self.mtypeName = child2.text\n if child2.tag == 'resi':\n self.itypeVal = child2.text\n if child2.tag == 'ress':\n self.itypeSize = int(child2.text)\n if child2.tag == 'type':\n self.mtype = child2.text\n #----------------------------------------------------------------------------\n # Can be overide in local file...\n def getDebug(self):\n \"\"\"\n \"\"\"\n return False\n\n # Do the reporting stuff...\n def report(Payload, Decode, timenow, mtype, decimate):\n \"\"\"Incoming data handler (to be overridden by user in derived class)\n \"\"\"\n logging.warning(\"Missing interface module...\")\n\n\n def NMEAdecode(self, data):\n \"\"\"NMEA data handler (to be overridden by user in derived class)\n \"\"\"\n logging.warning(\"Missing NMEA interface module...\")\n\n\n # ----------------------------------------------------------------------------\n # Method Prepare subscription...\n # Description Adds the JSON subscription to the EK80 subscriptor.\n # Can create og change a subscription...\n #-----------------------------------------------------------------------------\n def subscribe(self, sock, ApplicationID, transponder, create):\n \"\"\"Create or change a data subscription\n \"\"\"\n self.EK_req = self.EK_req.replace(\"?\", transponder)\n logging.debug(self.EK_req)\n\n if create == True:\n self.CreateSubscription(sock, ApplicationID, self.UDP_DATA, self.EK_req)\n else:\n self.ChangeSubscription(sock, ApplicationID, self.UDP_DATA, self.EK_req)\n\n def GetParameterValue(self, sock, ApplicationID, transponder, parameter_name):\n \"\"\"\n Retrieve a parameter value\n \"\"\"\n parameter_name = parameter_name.replace(\"?\", transponder)\n\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"ParameterServer\" \\\n \"\" \\\n \"\" \\\n \"{:s}\" \\\n \"\" \\\n \"\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no, parameter_name)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request, encoding='utf-8')\n sock.send(request)\n self.client_seq_no = self.client_seq_no + 1\n\n def SetParameter(self, sock, ApplicationID, transponder, parameter_name, parameter_value, parameter_type):\n \"\"\"\n Set a parameter value\n \"\"\"\n parameter_name = parameter_name.replace(\"?\", transponder)\n\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"ParameterServer\" \\\n \"\" \\\n \"\" \\\n \"{:s}\" \\\n \"{:s}\" \\\n \"{:s}\" \\\n \"\" \\\n \"\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no, parameter_name, parameter_value, parameter_type)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request, encoding='utf-8')\n sock.send(request)\n self.client_seq_no = self.client_seq_no + 1\n\n def CreateSubscription(self, sock, ApplicationID, port, parameter_name):\n \"\"\"\n Create a data subscription\n \"\"\"\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"RemoteDataServer\" \\\n \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:s}\" \\\n \"\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no, port, parameter_name)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request, encoding='utf-8')\n sock.send(request)\n self.client_seq_no = self.client_seq_no + 1\n\n #----------------------------------------------------------------------------\n # Method ChangeSubscription\n # Description Changes an existing subscription to EK80...\n #-----------------------------------------------------------------------------\n def ChangeSubscription(self, sock, ApplicationID, port, parameter_name):\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"RemoteDataServer\" \\\n \"\" \\\n \">\" \\\n \"{:d}\" \\\n \"{:s}\" \\\n \">\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no,ApplicationID,parameter_name)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request,encoding='utf-8')\n sock.send(request)\n self.client_seq_no=self.client_seq_no + 1\n\n #----------------------------------------------------------------------------\n # Method EK80_comunicate\n # Description Initiate a communication and data channel to the EK80...\n #-----------------------------------------------------------------------------\n def EK80_comunicate(self, port, data):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((self.UDP_IP, port))\n sock.settimeout(5.0)\n self.running = self.running |self. Status_Command\n\n while self.running & self.Status_Running:\n\n if len(data) >= 3:\n if data[:3] == b'SI2':\n msg = bytearray(b'CON\\0Name:Simrad;Password:\\0')\n sock.send(msg) # Send connect...\n\n elif data[:3] == b'RES':\n if data[4:7] == b'CON':\n if data[30:45] == b'ResultCode:S_OK':\n logging.debug(\"Connected\")\n\n data2 = data[46:].replace(b'AccessLevel:',b' ')\n data2 = data2.replace(b'ClientID:',b' ')\n data2 = data2.replace(b'}',b' ')\n data2 = data2.replace(b',',b' ')\n data3 = data2.split()\n ApplicationID = int(data3[1].decode())\n logging.debug(\"Get Param\")\n self.GetParameterValue(sock,ApplicationID, \"\", \"TransceiverMgr/Channels\" )\n\n else: # If failed the retry...\n logging.warning(\"Connection failed!\")\n msg = bytearray(b'CON\\0Name:Simrad;Password:\\0')\n sock.send(msg) # Send connect...\n\n elif data[4:7] == b'REQ':\n logging.debug('RES REQ received...')\n msg = data[30:].decode(\"utf-8\").rstrip(\"\\0\")\n root = ET.fromstring(msg)\n\n element = \"\"\n for table in root.iter('GetParameterResponse'):\n for child in table:\n for child2 in child:\n if child2.tag == 'value':\n element = child2.text.split(',')\n\n if len(element) > 0:\n if self.mode == -1: # If we already got a mode from command line parameter...\n print('\\n\\rTransponder to use:')\n i = 0\n for e in element:\n print('{:d}: '.format(i) + e)\n i = i+1\n\n # If there are only one head, then select it, no question...\n if len(element) == 1: # If there is only one option...\n self.mode = 0\n else: # Else let the user select...\n self.mode = -1\n while self.mode < 0 or self.mode > len(element):\n try:\n self.mode=int(input('Select Transponder: '))\n except ValueError:\n print (\"Not a number\")\n else:\n print('{:d}: '.format(self.mode) + element[self.mode])\n\n transponder = element[self.mode]\n #print(self.mtype)\n\n if self.mtype == \"Set_Param\":\n self.SetParameter(sock, ApplicationID, transponder, self.EK_req, self.EK_Value, self.EK_Type)\n self.running = self.running | self.Status_Done\n break\n else:\n self.subscribe(sock, ApplicationID, transponder, True)\n else:\n logging.debug(\"Received status\")\n if self.mtype == \"Set_Param\":\n self.cont = True\n self.running = self.running | self.Status_Command\n else:\n logging.warning(\"Received unknown response\")\n\n elif data[:3] == b'ALI':\n msg = 'ALI\\0ClientID:{:d},SeqNo:{:d}\\0'.format(ApplicationID, self.client_seq_no)\n msg = bytes(msg, encoding='utf-8')\n sock.send(msg) # Send connect...\n # logging.debug('.')\n elif data[:3] == b'RTR':\n logging.debug(\"RTR received: %s\", data)\n elif data[:3] == b'REQ':\n logging.debug(\"REQ received\")\n elif data[:3] == b'PRD':\n logging.debug(\"PRD received: {}\".format(data))\n else:\n logging.debug(\"Wrong data\")\n else:\n logging.error(\"EK80 error...\")\n\n\n try:\n data = sock.recv(20000)\n except socket.timeout:\n continue\n\n logging.debug(\"Closing command handler\")\n\n self.running = self.running & ~self.Status_Command\n msg = bytearray(b'DIS\\0Name:Simrad;Password:\\0')\n sock.send(msg) # Send connect...\n\n sock.settimeout(None)\n sock.close()\n\n #----------------------------------------------------------------------------\n # Method EK80_data\n # Description The subscription data handler...\n # Data is parsed according to the XML file...\n #-----------------------------------------------------------------------------\n\n def EK80_data(self,a,b):\n time = 0\n\n # Open the default channel...\n logging.debug(\"Setting up data channel\")\n\n datasock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n datasock.bind((\"0.0.0.0\", self.UDP_DATA))\n self.UDP_DATA = datasock.getsockname()[1]\n datasock.settimeout(5.0)\n logging.info('EK80data listening on port: %d', self.UDP_DATA)\n self.running = self.running | self.Status_Data\n\n # Data can in some case be received in frame sets, we then need to make sure that we start with the first frame in the set.\n # Some time we are a bit slow hence the Busy structure...\n while self.running & self.Status_Running:\n try:\n data = datasock.recv(50000)\n except socket.timeout:\n continue\n\n Decode = unpack('<4siiHHH',data[0:18])\n\n if self.busy == 110 and Decode[4] == Decode[3]:\n self.busy = 2 # Ready from next...\n else:\n self.finale_data = self.finale_data+data[18:]\n self.totalbytes = self.totalbytes + Decode[5]\n\n if Decode[4] == Decode[3]:\n self.busy = 1 #Busy...\n\n if self.debug == True:\n print(\"\\n\\rHeader: \".format(Decode[0].decode('utf-8')))\n print(Decode[0])\n print(\"SeqNo: {:d}\".format(Decode[1]))\n print(\"SubID: {:d}\".format(Decode[2]))\n print(\"CurrentMsg: {:d}\".format(Decode[3]))\n print(\"TotalMsg: {:d}\".format(Decode[4]))\n print(\"NoOfBytes: {:d}\".format(Decode[5]))\n\n if self.itypeSize > 0:\n tmp = unpack(\" 0:\n for loop in range(0,tmp[1]):\n start = loop*self.itypeSize\n end = (loop*self.itypeSize)+self.itypeSize\n dta = self.finale_data[start:end]\n Payload.append(unpack(\"<\"+self.itypeVal,self.finale_data[start:end]))\n\n if self.debug == 2:\n for element in Payload:\n for elements in element:\n print(\"Value: {:f}\".format(elements))\n\n else:\n Payload = unpack(\" 0:\n datasock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n datasock.bind((\"0.0.0.0\", int(self.NMEA_DATA)))\n self.NMEA_DATA = datasock.getsockname()[1]\n datasock.settimeout(5.0)\n print('NMEA listening on port:', self.NMEA_DATA)\n data = b\"\"\n self.running = self.running | self.Status_NMEA\n\n while self.running & self.Status_Running:\n try:\n data = datasock.recv(20000)\n except socket.timeout:\n continue\n\n self.NMEAdecode(data)\n # maybe add a sleep here?\n\n logging.debug(\"NMEA closed\")\n\n datasock.settimeout(None)\n datasock.close()\n\n else:\n logging.debug(\"NMEA not used\")\n\n self.running = self.running & ~self.Status_NMEA\n\n #----------------------------------------------------------------------------\n # Method man function, entry point\n # Description Parse the XML and get started...\n #-----------------------------------------------------------------------------\n def main(self):\n \"\"\"\n \"\"\"\n # Request an port number from the EK80 to use for future communications.\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((self.UDP_IP, self.UDP_PORT))\n sock.settimeout(5.0)\n sock.send('RSI\\0'.encode()) # Send reset...\n try:\n data = sock.recv(8000)\n except socket.timeout:\n print (\"No Equipment found, make shure the IP:port is set to: {:s}:{:d}\".format(self.UDP_IP, self.UDP_PORT))\n sock.close()\n return\n\n # Print status so far....\n print('Unit: ', data[4:8])\n print('ID: ', data[272:283])\n port = bytes_to_int(data[264:266])\n\n # Close and reopen a new channel...\n sock.settimeout(None)\n sock.close()\n\n #----------------------------------------------------------------------------\n # Start comunication...\n if len(data) > 3:\n\n self.running = self.Status_Running # Start running...\n\n logging.debug(\"Starting NMEA thread\")\n thread3 = threading.Thread(target=self.NMEA_data, args=(0, 0))\n thread3.start()\n\n logging.debug(\"Start Data thread\")\n thread2 = threading.Thread(target=self.EK80_data, args=(0, 0))\n thread2.start()\n\n logging.debug(\"Awaiting Data handler ready...\")\n\n while (self.running & self.Status_Data) == 0:\n time.sleep(1)\n if thread2.isAlive() == 0:\n break\n\n # If the data thread is running (should always be...)\n if self.running & self.Status_Data:\n logging.debug(\"Starting command thread\")\n\n thread1 = threading.Thread(target = self.EK80_comunicate, args = (port, data))\n thread1.start()\n\n while (self.running & self.Status_Command) == 0 and (self.running & self.Status_Done) == 0:\n time.sleep(1)\n if thread1.isAlive() == 0:\n break\n\n # Do data handle until enter i pressed...\n if self.running & self.Status_Command and self.cont == False:\n input('Enter to exit...')\n\n # Exit gracefully...\n logging.debug(\"Stopping\")\n\n time.sleep(4)\n self.running = self.running & ~self.Status_Running\n while self.running & ~self.Status_Done:\n time.sleep(1)\n\n time.sleep(2)\n", "sub_path": "t9ek80/t9ek80/t9ek80.py", "file_name": "t9ek80.py", "file_ext": "py", "file_size_in_byte": 24765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 103, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 103, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 302, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 302, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 302, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 336, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 336, "usage_type": "name"}, {"api_name": "socket.timeout", "line_number": 402, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 426, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 426, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 426, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 438, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 463, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 463, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 481, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 481, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 512, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 512, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 512, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 523, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 547, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 547, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 547, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 553, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 574, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 578, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 584, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 592, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 596, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 607, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 610, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 612, "usage_type": "call"}]} +{"seq_id": "523771320", "text": "import numpy as np\nimport os, csv\nimport cPickle as pickle\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nimport datetime as dt\n\nauthors = {}\n\nbasedir = '/home/cwp/EMC/lib/analysis/zhr/'\n\n# Get directories\ncwd = '/home/cwp/EMC/data/authors/'\nfilenames = os.listdir(cwd)\n\nfor filename in filenames:\n with open(cwd+filename, 'rb') as input:\n authors[filename[:-4]] = pickle.load(input)\n\ndef getObservers(filepath):\n base = filepath\n toReturn = {}\n\n for filename in os.listdir(filepath):\n toReturn[filename[:-4]] = []\n\n with open(base + filename, 'r') as f:\n for line in f.readlines():\n toReturn[filename[:-4]].append(line.split('\\n')[0])\n\n os.remove(base+filename)\n\n return toReturn\n\ndef getShowerInfo(filepath):\n data = {}\n with open(basedir+filepath, 'r') as f:\n readFile = list(csv.reader(f))\n for line in readFile:\n data[int(line[0])] = {'ra':float(line[1]),\\\n 'dec':float(line[2]), 'peak':line[3], 'start':line[4], \\\n 'end':line[5], 'r':float(line[6]), 'zhr_exp':int(line[7]),\\\n 'zhr_max':int(line[8])}\n\n return data\n\ndef getDateRange(start,end,startYear,endYear):\n startDate = datetime(startYear, int(start.split('/')[1]), \\\n int(start.split('/')[0]))\n\n endDate = datetime(endYear, int(end.split('/')[1]), \\\n int(end.split('/')[0]))\n\n dates = []\n\n while startDate <= endDate:\n dates.append(startDate)\n startDate += dt.timedelta(days=1)\n\n return dates\n\nshowers = ['perseids', 'leonids', 'quadrantids', 'geminids', 'orionids', 'eta_aquariids']\ndates = ['2005','2006','2007','2010','2011','2012','2013','2014','2015','2016']\n\nshowerObservers = {}\nshowerObserversFinal = {}\n\nfor shower in showers:\n showerObservers[shower] = getObservers(basedir+'dates/'+shower+'/')\n\nfor shower, observers in showerObservers.items():\n print('========'+shower+'========')\n\n showerData = getShowerInfo(shower+'radiant.txt')\n\n count = 0\n\n for observer in observers:\n #print(observer)\n okayDates = []\n\n for date in observers[observer]:\n noNaN = True\n noPeakNaN = True\n year = int(date[:4])\n\n if year in showerData.keys():\n peakDate = datetime(year,int(\\\n showerData[year]['peak'].split('/')[1]), int(\\\n showerData[year]['peak'].split('/')[0]))\n\n if shower != 'quadrantids':\n activeRange = getDateRange(showerData[year]['start'],\\\n showerData[year]['end'], year, year)\n else:\n activeRange = getDateRange(showerData[year]['start'],\\\n showerData[year]['end'], year, year+1)\n\n entry = authors[observer].data[date]\n entry.loadData()\n\n for day, dayData in entry.data.items():\n try:\n currentDate = datetime(int(date.split('-')[0]), \\\n int(date.split('-')[1]), int(day))\n if currentDate in activeRange:\n dayData = dayData[:-1]\n\n for hour in dayData:\n if hour == '-1':\n noNaN += 1\n if (currentDate == peakDate) and (hour == '-1'):\n noPeakNaN = False\n\n except:\n pass\n\n if (noNaN < 12) and noPeakNaN:\n okayDates.append(date)\n\n if len(okayDates) != 0:\n finalDates = []\n if shower == 'quadrantids':\n for aDate in okayDates:\n if aDate[-2:] == '01':\n if str(int(aDate[:-3])-1)+'-12' in okayDates:\n finalDates.append(str(int(aDate[:-3])-1)+'-12')\n finalDates.append(aDate)\n\n if shower == 'geminids':\n for aDate in okayDates:\n finalDates.append(aDate)\n\n if shower == 'leonids':\n for aDate in okayDates:\n finalDates.append(aDate)\n\n if shower == 'orionids':\n for aDate in okayDates:\n if aDate[-2:] == '10':\n if aDate[:-2]+'11' in okayDates:\n finalDates.append(aDate)\n finalDates.append(aDate[:-2]+'11')\n\n if shower == 'perseids':\n for aDate in okayDates:\n if aDate[-2:] == '07':\n if aDate[:-2]+'08' in okayDates:\n finalDates.append(aDate)\n finalDates.append(aDate[:-2]+'08')\n\n if shower == 'eta_aquariids':\n for aDate in okayDates:\n if aDate[-2:] == '04':\n if aDate[:-2]+'05' in okayDates:\n finalDates.append(aDate)\n finalDates.append(aDate[:-2]+'05')\n\n if len(finalDates) != 0:\n count += 1\n\n with open(basedir+'dates/'+shower+'/'+observer+'.txt', 'w') as f:\n for date in finalDates:\n f.write(date)\n f.write('\\n')\n print(count)\n", "sub_path": "zhr/refineAuthors.py", "file_name": "refineAuthors.py", "file_ext": "py", "file_size_in_byte": 5491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 31, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "384900255", "text": "# coding: utf8\nfrom coll_avoidance_modules.solo_coll_wrapper_c import *\nfrom coll_avoidance_modules.collisions_controller import *\nfrom coll_avoidance_modules.collisionsViewerClient import *\n\nfrom utils.logger import Logger\nfrom pynput import keyboard\n\nimport numpy as np\nimport argparse\nimport math\nfrom time import clock, sleep\nfrom solo12 import Solo12\n\n\ndef compute_pd(q_desired, v_desired, KP, KD, device):\n\tpos_error = q_desired - device.q_mes\n\tvel_error = v_desired - device.v_mes\n\ttau = KP * pos_error + KD * vel_error #+ KT * tau_desired\n\t#tau = np.maximum(np.minimum(tau, tau_max), -tau_max) \n\treturn tau\n\n\ndef on_press(key):\n\tglobal key_pressed\n\ttry:\n\t\tif key == keyboard.Key.enter:\n\t\t\tkey_pressed = True\n\t\t\t# Stop listener\n\t\t\treturn False\n\texcept AttributeError:\n\t\tprint('Unknown key {0} pressed'.format(key))\n\n\ndef put_on_the_floor(device, q_init):\n\tglobal key_pressed\n\tkey_pressed = False\n\tKp_pos = 3.\n\tKd_pos = 0.01\n\timax = 3.0\n\tpos = np.zeros(device.nb_motors)\n\tfor motor in range(device.nb_motors):\n\t\tpos[motor] = q_init[device.motorToUrdf[motor]] * device.gearRatioSigned[motor]\n\tlistener = keyboard.Listener(on_press=on_press)\n\tlistener.start()\n\tprint(\"Put the robot on the floor and press Enter\")\n\twhile not key_pressed:\n\t\tdevice.UpdateMeasurment()\n\t\tfor motor in range(device.nb_motors):\n\t\t\tref = Kp_pos*(pos[motor] - device.hardware.GetMotor(motor).GetPosition() - Kd_pos*device.hardware.GetMotor(motor).GetVelocity())\n\t\t\tref = min(imax, max(-imax, ref))\n\t\t\tdevice.hardware.GetMotor(motor).SetCurrentReference(ref)\n\t\tdevice.SendCommand(WaitEndOfCycle=True)\n\n\tprint(\"Start the motion.\")\n\n\ndef example_script(name_interface, legs_clib_path, shd_clib_path):\n device = Solo12(name_interface,dt=0.001)\n nb_motors = device.nb_motors\n LOGGING = False\n VIEWER = False\n \n qc = None\n if LOGGING:\n # Initialize logger\n qc = QualisysClient(ip=\"140.93.16.160\", body_id=0) # ??\n logger = Logger(device, qualisys=qc, logSize=50000)\n \n #### Set ref. traj. PD parameters\n ref_traj_KP = 0\n ref_traj_KV = 0\n active_dof = [0,1,2,3,4,5,6,7,8,9,10,11]\n\n #### Set collision avoidance parameters\n legs_threshold = 0.05\n legs_kp = 20.\n legs_kv = 0.0\n nb_legs_pairs = 20\n\n #### Shoulder collision parameters\n shd_threshold = 0.2\n shd_kp = 3.\n shd_kv = 0.\n\n #### Reference traj. parameters\n q_ref_list = '###.npy'\n dq_ref_list = '###.npy'\n\n traj_KP = 1*np.ones(12)\n traj_KP[:] = 0.\n traj_KV = 0*np.ones(12)\n\n q_init = q_ref_list[0][7:]\n traj_counter = 0\n\n ### Emergency behavior switches\n q_bounds = [-4,4]\n vq_max = 20.0\n tau_q_max = 1.0\n\n # Load the specified compiled C library\n cCollFun = CDLL(legs_clib_path)\n nnCCollFun = CDLL(shd_clib_path)\n # Initialize emergency behavior trigger var.\n emergencyFlag = 0\n\n # Initialize viewer\n if VIEWER:\n viewer_coll = viewerClient(nb_legs_pairs, 3, legs_threshold, shd_threshold, urdf=\"/home/ada/git/tnoel/solopython/coll_avoidance_modules/urdf/solo12_simplified.urdf\", modelPath=\"/home/ada/git/tnoel/solopython/coll_avoidance_modules/urdf\")\n\n device.Init(calibrateEncoders=True, q_init=q_init)\n\n put_on_the_floor(device, q_init)\n #CONTROL LOOP ***************************************************\n tau_q = np.zeros(nb_motors)\n tau_PD = np.zeros(nb_motors)\n while ((not device.hardware.IsTimeout()) and (clock() < 120) and emergencyFlag==0):\n device.UpdateMeasurment()\n\n tau_q[:] = 0.\n tau_PD[:] = 0.\n\n # Compute PD to follow reference traj.\n curr_q_ref = q_ref_list[traj_counter][7:]\n curr_dq_ref = dq_ref_list[traj_counter][6:]\n tau_PD = compute_pd(q_desired, v_desired, KP, KD, device)\n\n traj_counter += 1\n\n # Compute collisions distances and jacobians from the C lib. \n c_results = getLegsCollisionsResults(device.q_mes, cCollFun, nb_motors, nb_legs_pairs, witnessPoints=True)\n c_dist_legs = getLegsDistances(c_results, nb_motors, nb_legs_pairs, witnessPoints=True)\n c_Jlegs = getLegsJacobians(c_results, nb_motors, nb_legs_pairs, witnessPoints=True)\n c_wPoints = getLegsWitnessPoints(c_results, nb_motors, nb_legs_pairs)\n \n ### Get results from C generated code (shoulder neural net)\n #c_shd_dist, c_shd_jac = getAllShouldersCollisionsResults(device.q_mes, nnCCollFun, 2, offset=0.08) # 2D neural net\n c_shd_dist, c_shd_jac = getAllShouldersCollisionsResults(device.q_mes, nnCCollFun, 3, offset=0.11) #offset with 3 inputs: 0.18 (small), 0.11 (large)\"\n\n # Compute collision avoidance torque\n tau_legs = computeRepulsiveTorque(device.q_mes, device.v_mes, c_dist_legs, c_Jlegs, legs_threshold, legs_kp, legs_kv, opposeJacIfNegDist=True)\n tau_shd = computeRepulsiveTorque(device.q_mes, device.v_mes, c_shd_dist, c_shd_jac, shd_threshold, shd_kp, shd_kv, opposeJacIfNegDist=False)\n\n tau_q = 1*tau_legs + 1*tau_shd\n\n # Set the computed torque as command\n tau_command = tau_q + tau_PD\n\n device.SetDesiredJointTorque(0*tau_command)\n # Check the condition for triggering emergency behavior\n emergencyFlag = max(emergencyFlag, emergencyCondition(device.q_mes, device.v_mes, tau_command, q_bounds, vq_max, tau_q_max))\n # Call logger\n if LOGGING:\n logger.sample(device, qualisys=qc)\n\n if VIEWER :\n viewer_coll.display(np.concatenate(([0,0,0,0,0,0,0],device.q_mes)), c_dist_legs, c_shd_dist, c_wPoints, tau_legs, tau_shd)\n\n device.SendCommand(WaitEndOfCycle=True)\n if ((device.cpt % 100) == 0):\n device.Print()\n print('Avoid. torque')\n print(tau_q)\n print('PD torque')\n print(tau_PD)\n\n\n #****************************************************************\n\n print(\"Emergency : {}\".format(emergencyFlag))\n\n # Whatever happened we send 0 torques to the motors.\n device.SetDesiredJointTorque([0]*nb_motors)\n device.SendCommand(WaitEndOfCycle=True)\n \n # Save the logs of the Logger object\n if LOGGING:\n logger.saveAll()\n print(\"Log saved\")\n \n if device.hardware.IsTimeout():\n print(\"Masterboard timeout detected.\")\n print(\"Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.\")\n device.hardware.Stop() # Shut down the interface between the computer and the master board\n \ndef main():\n parser = argparse.ArgumentParser(description='Example masterboard use in python.')\n parser.add_argument('-i',\n '--interface',\n required=True,\n help='Name of the interface (use ifconfig in a terminal), for instance \"enp1s0\"')\n\n parser.add_argument('-CL',\n '--cliblegs',\n required=True,\n help='Path to the compiled C-generated library used for distance and jacobian evaluations, for instance \"libcoll_legs8.so\"')\n\n parser.add_argument('-CS',\n '--clibshd',\n required=True,\n help='Path to the compiled C-generated library used for shoulder distance and jacobian evaluations, for instance \"libcoll_nn.so\"')\n\n example_script(parser.parse_args().interface, parser.parse_args().cliblegs, parser.parse_args().clibshd)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main_solo12_collision_avoidance.py", "file_name": "main_solo12_collision_avoidance.py", "file_ext": "py", "file_size_in_byte": 7389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pynput.keyboard.Key", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "pynput.keyboard.Listener", "line_number": 44, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 44, "usage_type": "name"}, {"api_name": "solo12.Solo12", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.logger.Logger", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 158, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "320765239", "text": "import json\nimport pytest\nimport uuid\nfrom httpretty import httpretty\n\nfrom rasa_core import utils\nfrom rasa_core.training import online\nfrom rasa_core.utils import EndpointConfig\n\n\n@pytest.fixture\ndef mock_endpoint():\n return EndpointConfig(\"https://abc.defg\")\n\n\ndef test_send_message(mock_endpoint):\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/messages'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.POST, url, body='{}')\n\n httpretty.enable()\n online.send_message(mock_endpoint, sender_id, \"Hello\")\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n assert json.loads(b) == {\n \"sender\": \"user\",\n \"text\": \"Hello\",\n \"parse_data\": None\n }\n\n\ndef test_request_prediction(mock_endpoint):\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/predict'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.POST, url, body='{}')\n\n httpretty.enable()\n online.request_prediction(mock_endpoint, sender_id)\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n assert b == \"\"\n\n\ndef test_bot_output_format():\n message = {\n \"text\": \"Hello!\",\n \"data\": {\n \"image\": \"http://example.com/myimage.png\",\n \"attachment\": \"My Attachment\",\n \"buttons\": [\n {\"title\": \"yes\", \"payload\": \"/yes\"},\n {\"title\": \"no\", \"payload\": \"/no\"}]\n }\n }\n formatted = online.format_bot_output(message)\n assert formatted == (\"Hello!\\n\"\n \"Image: http://example.com/myimage.png\\n\"\n \"Attachment: My Attachment\\n\"\n \"1: yes (/yes)\\n\"\n \"2: no (/no)\")\n\n\ndef test_latest_user_message():\n tracker_dump = \"data/test_trackers/tracker_moodbot.json\"\n tracker_json = json.loads(utils.read_file(tracker_dump))\n\n m = online.latest_user_message(tracker_json.get(\"events\"))\n\n assert m is not None\n assert m[\"event\"] == \"user\"\n assert m[\"text\"] == \"/mood_great\"\n\n\ndef test_latest_user_message_on_no_events():\n m = online.latest_user_message([])\n\n assert m is None\n\n\ndef test_all_events_before_user_msg():\n tracker_dump = \"data/test_trackers/tracker_moodbot.json\"\n tracker_json = json.loads(utils.read_file(tracker_dump))\n evts = tracker_json.get(\"events\")\n\n m = online.all_events_before_latest_user_msg(evts)\n\n assert m is not None\n assert m == evts[:4]\n\n\ndef test_all_events_before_user_msg_on_no_events():\n assert online.all_events_before_latest_user_msg([]) == []\n\n\ndef test_print_history(mock_endpoint):\n tracker_dump = utils.read_file(\n \"data/test_trackers/tracker_moodbot.json\")\n\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/tracker'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.GET, url, body=tracker_dump)\n\n httpretty.enable()\n online._print_history(sender_id, mock_endpoint)\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n assert b == \"\"\n assert (httpretty.latest_requests[-1].path ==\n \"/conversations/{}/tracker?include_events=AFTER_RESTART\"\n \"\".format(sender_id))\n\n\ndef test_is_listening_for_messages(mock_endpoint):\n tracker_dump = utils.read_file(\n \"data/test_trackers/tracker_moodbot.json\")\n\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/tracker'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.GET, url, body=tracker_dump)\n\n httpretty.enable()\n is_listening = online.is_listening_for_message(sender_id, mock_endpoint)\n httpretty.disable()\n\n assert is_listening\n\n\ndef test_splitting_conversation_at_restarts():\n tracker_dump = \"data/test_trackers/tracker_moodbot.json\"\n evts = json.loads(utils.read_file(tracker_dump)).get(\"events\")\n evts_wo_restarts = evts[:]\n evts.insert(2, {\"event\": \"restart\"})\n evts.append({\"event\": \"restart\"})\n\n split = online._split_conversation_at_restarts(evts)\n assert len(split) == 2\n assert [e for s in split for e in s] == evts_wo_restarts\n assert len(split[0]) == 2\n assert len(split[0]) == 2\n\n\ndef test_as_md_message():\n parse_data = {\n \"text\": \"Hello there rasa.\",\n \"entities\": [{\"start\": 12,\n \"end\": 16,\n \"entity\": \"name\",\n \"value\": \"rasa\"}],\n \"intent\": {\"name\": \"greeting\", \"confidence\": 0.9}\n }\n md = online._as_md_message(parse_data)\n assert md == \"Hello there [rasa](name).\"\n\n\ndef test_validate_user_message():\n parse_data = {\n \"text\": \"Hello there rasa.\",\n \"parse_data\": {\n \"entities\": [{\"start\": 12,\n \"end\": 16,\n \"entity\": \"name\",\n \"value\": \"rasa\"}],\n \"intent\": {\"name\": \"greeting\", \"confidence\": 0.9}\n }\n }\n assert online._validate_user_regex(parse_data, [\"greeting\", \"goodbye\"])\n assert not online._validate_user_regex(parse_data, [\"goodbye\"])\n\n\ndef test_undo_latest_msg(mock_endpoint):\n tracker_dump = utils.read_file(\n \"data/test_trackers/tracker_moodbot.json\")\n tracker_json = json.loads(tracker_dump)\n evts = tracker_json.get(\"events\")\n\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/tracker'.format(\n mock_endpoint.url, sender_id)\n replace_url = '{}/conversations/{}/tracker/events'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.GET, url, body=tracker_dump)\n httpretty.register_uri(httpretty.PUT, replace_url)\n\n httpretty.enable()\n online._undo_latest(sender_id, mock_endpoint)\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n\n # this should be the events the online call send to the endpoint\n # these events should have the last utterance omitted\n replaced_evts = json.loads(b)\n assert len(replaced_evts) == 6\n assert replaced_evts == evts[:6]\n", "sub_path": "tests/test_online.py", "file_name": "test_online.py", "file_ext": "py", "file_size_in_byte": 6136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rasa_core.utils.EndpointConfig", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 17, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 21, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 21, "usage_type": "name"}, {"api_name": "httpretty.httpretty.POST", "line_number": 21, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 23, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 23, "usage_type": "name"}, {"api_name": "rasa_core.training.online.send_message", "line_number": 24, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 24, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 25, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 25, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 27, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 27, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 36, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 40, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 40, "usage_type": "name"}, {"api_name": "httpretty.httpretty.POST", "line_number": 40, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 42, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 42, "usage_type": "name"}, {"api_name": "rasa_core.training.online.request_prediction", "line_number": 43, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 43, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 44, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 44, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 46, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 46, "usage_type": "name"}, {"api_name": "rasa_core.training.online.format_bot_output", "line_number": 61, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 61, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "rasa_core.utils.read_file", "line_number": 71, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 71, "usage_type": "name"}, {"api_name": "rasa_core.training.online.latest_user_message", "line_number": 73, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 73, "usage_type": "name"}, {"api_name": "rasa_core.training.online.latest_user_message", "line_number": 81, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 81, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 88, "usage_type": "call"}, {"api_name": "rasa_core.utils.read_file", "line_number": 88, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 88, "usage_type": "name"}, {"api_name": "rasa_core.training.online.all_events_before_latest_user_msg", "line_number": 91, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 91, "usage_type": "name"}, {"api_name": "rasa_core.training.online.all_events_before_latest_user_msg", "line_number": 98, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 98, "usage_type": "name"}, {"api_name": "rasa_core.utils.read_file", "line_number": 102, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 102, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 105, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 109, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 109, "usage_type": "name"}, {"api_name": "httpretty.httpretty.GET", "line_number": 109, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 111, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 111, "usage_type": "name"}, {"api_name": "rasa_core.training.online._print_history", "line_number": 112, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 112, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 113, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 113, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 115, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 115, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 117, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 117, "usage_type": "name"}, {"api_name": "rasa_core.utils.read_file", "line_number": 123, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 123, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 126, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 130, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 130, "usage_type": "name"}, {"api_name": "httpretty.httpretty.GET", "line_number": 130, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 132, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 132, "usage_type": "name"}, {"api_name": "rasa_core.training.online.is_listening_for_message", "line_number": 133, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 133, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 134, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 134, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 141, "usage_type": "call"}, {"api_name": "rasa_core.utils.read_file", "line_number": 141, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 141, "usage_type": "name"}, {"api_name": "rasa_core.training.online._split_conversation_at_restarts", "line_number": 146, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 146, "usage_type": "name"}, {"api_name": "rasa_core.training.online._as_md_message", "line_number": 162, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 162, "usage_type": "name"}, {"api_name": "rasa_core.training.online._validate_user_regex", "line_number": 177, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 177, "usage_type": "name"}, {"api_name": "rasa_core.training.online._validate_user_regex", "line_number": 178, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 178, "usage_type": "name"}, {"api_name": "rasa_core.utils.read_file", "line_number": 182, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 182, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 184, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 187, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 193, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 193, "usage_type": "name"}, {"api_name": "httpretty.httpretty.GET", "line_number": 193, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 194, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 194, "usage_type": "name"}, {"api_name": "httpretty.httpretty.PUT", "line_number": 194, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 196, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 196, "usage_type": "name"}, {"api_name": "rasa_core.training.online._undo_latest", "line_number": 197, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 197, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 198, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 198, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 200, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 200, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "5665522", "text": "import nltk\nfrom datetime import datetime\nfrom nltk_data.stop_words_data.stop_word_processing import get_stop_words\nfrom string import whitespace\nfrom collections import Counter\nfrom langdetect import detect\n\n\ndef avg(a, b):\n return a / b if b != 0 else 0\n\n\nclass SimpleMetricsCallback(object):\n sent_detector = nltk.tokenize.punkt.PunktSentenceTokenizer()\n\n def timedelta(self, creation_time):\n \"\"\" return days between the article publication\n and the dataset acquisition.\"\"\"\n creation = datetime.strptime(creation_time[:19], '%Y-%m-%d %H:%M:%S')\n now = datetime.utcnow()\n delta = now - creation\n return delta.days\n\n @staticmethod\n def n_symbols(text, ignore_spaces=False):\n if ignore_spaces:\n return len([c for c in text if c not in whitespace])\n else:\n return len(text)\n\n @staticmethod\n def n_syllables(words):\n count = 0\n vowels = 'aeiouy'\n\n for word in words:\n if word[0] in vowels:\n count += 1\n for i in range(1, len(word)):\n if word[i] in vowels and word[i-1] not in vowels:\n count += 1\n if word.endswith('e'):\n count -= 1\n\n return count\n\n def n_sentences(self, text):\n return len(self.sent_detector.tokenize(text.strip()))\n\n @staticmethod\n def most_common_words(words, count=5):\n words = Counter(words)\n most_common = words.most_common(count)\n if most_common:\n return ', '.join('\"{}\": {}'.format(k, v) for k, v in most_common)\n else:\n return '-'\n\n def __call__(self, text):\n if text == \"\":\n return (\n ('n_symbols', 0),\n ('n_symbols_no_space', 0),\n ('n_syllables', 0),\n ('n_sentences', 0),\n ('n_tokens_content', 0),\n ('n_unique_tokens', 0),\n ('n_non_stop_words', 0),\n ('n_non_stop_unique_tokens', 0),\n ('average_sentence_length', 0),\n ('average_token_length', 0),\n ('average_token_length_syllables', 0),\n ('most_common_non_stop_words', 0),\n )\n\n try:\n text_lang = detect(text)\n except Exception as e:\n text_lang = 'en'\n\n n_symbols = self.n_symbols(text)\n n_symbols_no_space = self.n_symbols(text, ignore_spaces=True)\n n_sentences = self.n_sentences(text)\n words = [w for w in nltk.tokenize.word_tokenize(text) if w.isalpha()]\n\n if text_lang == 'de':\n self.stop_words = get_stop_words('de')\n else:\n # english stopwords by default\n self.stop_words = get_stop_words('en')\n\n non_stop_words = [word for word in words if word not in self.stop_words]\n n_syllables = self.n_syllables(words)\n\n return (\n ('n_symbols', n_symbols),\n ('n_symbols_no_space', n_symbols_no_space),\n ('n_syllables', n_syllables),\n ('n_sentences', n_sentences),\n ('n_tokens_content', len(words)),\n ('n_unique_tokens', len(set(words))),\n ('n_non_stop_words', len(non_stop_words)),\n ('n_non_stop_unique_tokens', len(set(non_stop_words))),\n ('average_sentence_length', avg(len(words), n_sentences)),\n ('average_token_length', avg(sum([len(word) for word in words]), len(words))),\n ('average_token_length_syllables', avg(n_syllables, len(words))),\n ('most_common_non_stop_words', self.most_common_words(non_stop_words)),\n )\n", "sub_path": "parameters_extractor/metrics/simple.py", "file_name": "simple.py", "file_ext": "py", "file_size_in_byte": 3669, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.tokenize.punkt.PunktSentenceTokenizer", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "string.whitespace", "line_number": 27, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 52, "usage_type": "call"}, {"api_name": "langdetect.detect", "line_number": 77, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 84, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 84, "usage_type": "attribute"}, {"api_name": "nltk_data.stop_words_data.stop_word_processing.get_stop_words", "line_number": 87, "usage_type": "call"}, {"api_name": "nltk_data.stop_words_data.stop_word_processing.get_stop_words", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "297982540", "text": "\"\"\"\nThe `OWER Directory` contains the input files required for training the\n`OWER Classifier`. The `OWER Temp Directory` keeps intermediate files\nfor debugging purposes.\n\n**Structure**\n\n::\n\n ower/ # OWER Directory\n\n tmp/ # OWER Temp Directory\n\n ent_labels.txt # OWER Entity Labels TXT\n rel_labels.txt # OWER Relation Labels TXT\n\n classes.tsv # OWER Classes TSV\n\n test.tsv # OWER Test Samples TSV\n train.tsv # OWER Train Samples TSV\n valid.tsv # OWER Valid Samples TSV\n\n|\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nfrom spacy.lang.en import English\nfrom torchtext.data import TabularDataset, Field\nfrom torchtext.vocab import Vocab\n\nfrom dao.base_dir import BaseDir\nfrom dao.ower.classes_tsv import ClassesTsv\nfrom dao.ower.samples_tsv import SamplesTsv\nfrom dao.ower.tmp.tmp_dir import TmpDir\nfrom dao.ryn.split.labels_txt import LabelsTxt\n\n\n@dataclass\nclass Sample:\n ent: int\n classes: List[int]\n sents: List[List[int]]\n\n def __iter__(self):\n return iter((self.ent, self.classes, self.sents))\n\n\nclass OwerDir(BaseDir):\n tmp_dir: TmpDir\n\n ent_labels_txt: LabelsTxt\n rel_labels_txt: LabelsTxt\n\n classes_tsv: ClassesTsv\n\n train_samples_tsv: SamplesTsv\n valid_samples_tsv: SamplesTsv\n test_samples_tsv: SamplesTsv\n\n def __init__(self, path: Path):\n super().__init__(path)\n\n self.tmp_dir = TmpDir(path.joinpath('tmp'))\n\n self.ent_labels_txt = LabelsTxt(path.joinpath('ent_labels.txt'))\n self.rel_labels_txt = LabelsTxt(path.joinpath('rel_labels.txt'))\n\n self.classes_tsv = ClassesTsv(path.joinpath('classes.tsv'))\n\n self.train_samples_tsv = SamplesTsv(path.joinpath('train.tsv'))\n self.valid_samples_tsv = SamplesTsv(path.joinpath('valid.tsv'))\n self.test_samples_tsv = SamplesTsv(path.joinpath('test.tsv'))\n\n def check(self) -> None:\n super().check()\n\n self.tmp_dir.check()\n\n self.ent_labels_txt.check()\n self.rel_labels_txt.check()\n\n self.classes_tsv.check()\n\n self.train_samples_tsv.check()\n self.valid_samples_tsv.check()\n self.test_samples_tsv.check()\n\n def create(self) -> None:\n super().create()\n\n self.tmp_dir.create()\n\n def read_datasets(self, class_count: int, sent_count: int, vectors=None) \\\n -> Tuple[List[Sample], List[Sample], List[Sample], Vocab]:\n \"\"\"\n :param vectors: Pre-trained word embeddings\n \"\"\"\n\n nlp = English()\n spacy_tokenizer = nlp.tokenizer\n\n def tokenize(text: str) -> List[str]:\n return [token.text for token in spacy_tokenizer(text.strip())]\n\n ent_field = Field(sequential=False, use_vocab=False)\n ent_label_field = Field()\n class_field = Field(sequential=False, use_vocab=False)\n sent_field = Field(sequential=True, use_vocab=True, tokenize=tokenize, lower=True)\n\n ent_col = ('ent', ent_field)\n ent_label_col = ('ent_label', ent_label_field)\n class_cols = [(f'class_{i}', class_field) for i in range(class_count)]\n sent_cols = [(f'sent_{i}', sent_field) for i in range(sent_count)]\n\n cols = [ent_col, ent_label_col] + class_cols + sent_cols\n\n train_tab_set = TabularDataset(str(self.train_samples_tsv.path), 'tsv', cols, skip_header=True)\n valid_tab_set = TabularDataset(str(self.valid_samples_tsv.path), 'tsv', cols, skip_header=True)\n test_tab_set = TabularDataset(str(self.test_samples_tsv.path), 'tsv', cols, skip_header=True)\n\n #\n # Build vocab on train data\n #\n\n sent_field.build_vocab(train_tab_set, vectors=vectors)\n vocab = sent_field.vocab\n\n #\n # Transform TabularDataset -> List[Sample]\n #\n\n def transform(raw_set: TabularDataset) -> List[Sample]:\n return [Sample(\n int(getattr(row, 'ent')),\n [int(getattr(row, f'class_{i}')) for i in range(class_count)],\n [[vocab[token] for token in getattr(row, f'sent_{i}')] for i in range(sent_count)]\n ) for row in raw_set]\n\n train_set = transform(train_tab_set)\n valid_set = transform(valid_tab_set)\n test_set = transform(test_tab_set)\n\n return train_set, valid_set, test_set, vocab\n", "sub_path": "src/dao/ower/ower_dir.py", "file_name": "ower_dir.py", "file_ext": "py", "file_size_in_byte": 4418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 41, "usage_type": "name"}, {"api_name": "dao.base_dir.BaseDir", "line_number": 51, "usage_type": "name"}, {"api_name": "dao.ower.tmp.tmp_dir.TmpDir", "line_number": 52, "usage_type": "name"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 54, "usage_type": "name"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 55, "usage_type": "name"}, {"api_name": "dao.ower.classes_tsv.ClassesTsv", "line_number": 57, "usage_type": "name"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 59, "usage_type": "name"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 60, "usage_type": "name"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 61, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 63, "usage_type": "name"}, {"api_name": "dao.ower.tmp.tmp_dir.TmpDir", "line_number": 66, "usage_type": "call"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 68, "usage_type": "call"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 69, "usage_type": "call"}, {"api_name": "dao.ower.classes_tsv.ClassesTsv", "line_number": 71, "usage_type": "call"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 73, "usage_type": "call"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 74, "usage_type": "call"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 75, "usage_type": "call"}, {"api_name": "spacy.lang.en.English", "line_number": 102, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 105, "usage_type": "name"}, {"api_name": "torchtext.data.Field", "line_number": 108, "usage_type": "call"}, {"api_name": "torchtext.data.Field", "line_number": 109, "usage_type": "call"}, {"api_name": "torchtext.data.Field", "line_number": 110, "usage_type": "call"}, {"api_name": "torchtext.data.Field", "line_number": 111, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 120, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 121, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 122, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "torchtext.vocab.Vocab", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "519452795", "text": "\"\"\"\n/**************************************************************\n* Name : definitions.py\n* Author : Tom Sorteberg\n* Created : 12/08/2020\n* Course : CIS 152 Data Structures\n* Version : 1.0\n* OS : Windows 10 Professional 1909\n* Copyright : This is my own original work based on\n* specifications issued by our instructor\n* Description : This class file defines both the Schedule\n and Group wrapper data types for the\n Scheduling Application.\n* Academic Honesty: I attest that this is my original work.\n* I have not used unauthorized source code, either modified or\n* unmodified. I have not given other fellow student(s) access to\n* my program.\n***************************************************************/\n\"\"\"\nfrom constants import constants\nfrom modules.validate import check_ticket\nimport re\nfrom modules.validate import update_csv\nimport csv\nimport ast\nimport os\nimport shutil\n\n\"\"\" Class Schedule\"\"\"\n\n\nclass Schedule(object):\n \"\"\"\n This class represents a queue data structure with\n associated class and static functions. The Schedule\n queue is then populated with Group class nodes.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor.\n \"\"\"\n self._queue = []\n self._group_number = 1\n\n def insert(self, entries):\n \"\"\"\n Inserts a group object\n :param entries: Required list.\n :return: No return.\n \"\"\"\n\n # Local variable declaration and initialization.\n character_set = set(\"0123456789GSPR \")\n number_set = set(\"0123456789\")\n email_set = '^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\n length = len(entries)\n priority = self.priority(entries)\n inserted = False\n no_email = False\n min_age = False\n max_age = False\n checked = []\n duplicate = False\n\n # Input validation.\n for entry in entries:\n # Validate ticket numbers.\n if len(entry[0]) != constants.TICKET_LEN \\\n or not character_set.issuperset(entry[0]) \\\n or not check_ticket(entry[0]):\n # Raise exception.\n raise ValueError(\"Invalid value for ticket number parameter.\")\n # Validate age.\n elif not number_set.issuperset((entry[1])) or int(entry[1]) < constants.MIN_AGE:\n # Raise exception.\n raise ValueError(\"Invalid value for age parameter.\")\n elif not number_set.issuperset((entry[2])) or int(entry[2]) < constants.MIN_HEIGHT:\n # Raise exception.\n raise ValueError(\"Invalid value for height parameter.\")\n # Validate email.\n elif entry[3] != \"\" and not re.search(email_set, entry[3]):\n # Raise exception.\n raise ValueError(\"Invalid value for email parameter.\")\n # If there are no email addresses, then set no_email variable to True.\n elif entry[3] != \"\":\n no_email = True\n\n # Check for duplicates and age verification.\n for entry in entries:\n if entry[0] is not None and entry[0] in checked:\n duplicate = True\n checked.append(entry[0])\n # If age field is not empty and is an integer.\n if entry[1] != \"\" and number_set.issuperset(entry[1]):\n # If the age field is less than or equal to 7,\n # set min_age iteration variable to true.\n if int(entry[1]) <= constants.MIN_ACCP:\n min_age = True\n # If the age field is greater than than or equal to 14,\n # set max_age iteration variable to true.\n if int(entry[1]) >= constants.MAX_ACCP:\n max_age = True\n\n # If no email address is provided.\n if not no_email:\n # Raise exception.\n raise ValueError(\"No value provided for email.\")\n # If accompany requirements are not met.\n elif min_age is True and max_age is not True:\n raise ValueError(\"Accompany requirements not met.\")\n # If there are duplicates.\n elif duplicate:\n # Raise exception.\n raise ValueError(\"Duplicate ticket exists\")\n\n # If the group is full or the queue is empty.\n if length == constants.MAX_GROUP or self.is_empty():\n # Append group object to queue.\n self._queue.append(Group(entries, priority, self._group_number))\n # Increment group number.\n self._group_number += 1\n # Set inserted to True.\n inserted = True\n # Else if group is less than 4, find a group with same priority\n # with room for additional members and if available, insert member\n # information.\n else:\n for group in self._queue:\n if (constants.MAX_GROUP - group.size()) >= length \\\n and priority == group.get_priority():\n inserted = True\n for entry in entries:\n group.update(entry)\n # If queue is not empty and no suitable group is found, create\n # a new group.\n if not inserted:\n self._queue.append(Group(entries, priority, self._group_number))\n # Increment group number.\n self._group_number += 1\n # Remove ticket entries from valid.csv to prevent additional\n # registration.\n update_csv(entries)\n # Write entry to backup.csv file in case of recovery.\n self.backup_csv()\n\n def remove(self):\n \"\"\"\n Function that removes a group from the queue based on group number.\n :return: Returns a Group object.\n \"\"\"\n # Return statement.\n return self._queue.pop(0)\n\n def size(self):\n \"\"\"\n Function that returns the size of the queue.\n :return: Returns an integer.\n \"\"\"\n # Return statement.\n return len(self._queue)\n\n def is_empty(self):\n \"\"\"\n Function that returns True if the queue is empty.\n :return: Returns a boolean.\n \"\"\"\n # Return statement.\n return len(self._queue) == 0\n\n def search(self, value):\n \"\"\"\n Function that performs a search based on group number.\n Returns true if found.\n :param value: Required integer.\n :return: Returns a boolean.\n \"\"\"\n # Local variable declaration and initialization.\n return_statement = False\n # Input Validation.\n if isinstance(value, int):\n # For loop to iterate through queue.\n # If value is found, return True.\n for group in self._queue:\n if group.get_group_num() == value:\n return_statement = True\n else:\n raise ValueError(\"Parameter value must be an integer.\")\n\n # Return statement.\n return return_statement\n\n def display_group(self, value):\n \"\"\"\n Function that displays group information based on group number.\n :param value: Required integer.\n :return: Returns a string.\n \"\"\"\n # Local variable declaration and initialization.\n return_statement = \"Group not found.\"\n member_statement = \"\"\n # Input Validation.\n if isinstance(value, int):\n # For loop to search for group value.\n for group in self._queue:\n # If group found, return group information.\n if group.get_group_num() == value:\n members = group.get_members()\n for member in members:\n member_statement = member_statement \\\n + \"\\nTicket#: \" + member[0] \\\n + \"\\nAge: \" + member[1] \\\n + \"\\nHeight: \" + member[2] \\\n + \"\\nemail: \" + member[3] + \"\\n\"\n\n return_statement = \"Group#: \" + str(group.get_group_num()) + \\\n \", Priority: \" + group.get_priority() + \\\n \", \\nMembers: \\n\" + member_statement\n # Return statement.\n return return_statement\n else:\n raise ValueError(\"Parameter value must be an integer.\")\n\n def import_csv(self):\n \"\"\"\n Function that imports data from backup .csv and rebuilds\n priority queue.\n :return: No return.\n \"\"\"\n # Local variable declaration and initialization.\n # Try except clause to check if file is available.\n try:\n with open('../backup/backup.csv', mode='r', newline=\"\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for line in csv_reader:\n # Convert string representation of list to type list.\n temp_string = line[2]\n temp_list = ast.literal_eval(temp_string)\n self._queue.append(Group(temp_list, line[1], int(line[0])))\n self._group_number += 1\n except FileNotFoundError:\n # Raise exception.\n raise FileNotFoundError(\"Backup.csv file cannot be found.\")\n\n def backup_csv(self):\n \"\"\"\n Function that exports data to a .csv for backup.\n :return: No return.\n \"\"\"\n\n # Overwrite export.csv if it exists.\n with open('../backup/backup.csv', mode='w', newline=\"\") as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n for group in self._queue:\n csv_writer.writerow([group.get_group_num(), group.get_priority(), group.get_members()])\n # Close open object.\n csv_file.close()\n\n def selection_sort(self):\n \"\"\"\n Function that performs a selection sort algorithm on the queue based\n on group priority.\n :return: No return.\n \"\"\"\n\n def swap(min_value, index_value):\n \"\"\"\n Helper function that exchanges queue positions from minimum value\n and index value.\n :param min_value: Required integer.\n :param index_value: Required integer.\n :return: No return.\n \"\"\"\n temp = self._queue[min_value]\n self._queue[min_value] = self._queue[index_value]\n self._queue[index_value] = temp\n\n # Local variable declaration and initialization.\n index = 0\n count = 0\n # While the index is less than the total size of the queue.\n while index < self.size() - 1:\n # Set the minimum index value to index.\n min_index = index\n # Set the probe index to index value plus one.\n probe = index + 1\n # While the probe index is less than the total size of the queue.\n while probe < self.size():\n # If probe index priority is less than minimum index priority.\n if self._queue[probe].get_priority() < self._queue[min_index].get_priority():\n # Set minimum index to probe index.\n min_index = probe\n # Increment counters.\n probe += 1\n count += 1\n # If minimum index value does not equal initial index value.\n if min_index != index:\n # Function call to swap group positions in queue if value is found.\n swap(min_index, index)\n # Increment counters.\n index += 1\n count += 1\n\n def export_csv(self):\n \"\"\"\n Function that exports data to a .csv for backup.\n :return: No return.\n \"\"\"\n # Function call for selection sort.\n self.selection_sort()\n\n # If previous export exists, backup to archive.\n if os.path.exists(\"../export/export.csv\"):\n shutil.copyfile(\"../export/export.csv\", \"../archive/export.csv\")\n # Overwrite export.csv if it exists.\n with open('../export/export.csv', mode='w', newline=\"\") as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n\n for group in self._queue:\n temp_list = group.get_members()\n csv_writer.writerow([\"Group#\", \"Priority\"])\n csv_writer.writerow([group.get_group_num(), group.get_priority(), \"Ticket#\", \"Age\", \"Height\", \"Email\"])\n index = 0\n for _ in temp_list:\n csv_writer.writerow([\"\", \"\", temp_list[index][0], temp_list[index][1], temp_list[index][2], temp_list[index][3]])\n index += 1\n\n # Close open object.\n csv_file.close()\n\n # Copy backup file to archive.\n shutil.copyfile(\"../backup/backup.csv\", \"../archive/backup.csv\")\n\n # Delete backup file.\n os.remove(\"../backup/backup.csv\")\n\n @ staticmethod\n def priority(entries):\n \"\"\"\n Static function that determines group priority.\n :param entries: Required list.\n :return: Returns a string.\n \"\"\"\n # Local variable declaration and initialization.\n priority = None\n # Input validation.\n if isinstance(entries, list):\n # For loop and selection logic to determine group priority.\n for entry in entries:\n if entry[0] != \"\" and entry[0][0:2] == \"GP\":\n if priority is None:\n priority = \"A\"\n elif priority >= \"A\":\n priority = \"A\"\n elif entry[0] != \"\" and entry[0][0:2] == \"GS\":\n if priority is None:\n priority = \"B\"\n elif priority >= \"B\":\n priority = \"B\"\n elif entry[0] != \"\" and entry[0][0:2] == \"PR\":\n if priority is None:\n priority = \"C\"\n elif priority >= \"C\":\n priority = \"C\"\n elif entry[0] != \"\" and entry[0][0:2] == \"GR\":\n if priority is None:\n priority = \"D\"\n elif priority >= \"D\":\n priority = \"D\"\n else:\n # Raise exception.\n raise ValueError(\"Parameter must be type list.\")\n\n # Return statement.\n return priority\n\n\n\"\"\" Class Group \"\"\"\n\n\nclass Group(object):\n\n def __init__(self, entries, priority, group_num):\n \"\"\"\n Default constructor.\n :param entries: Required list.\n :param priority: Required String.\n :param group_num: Required integer.\n \"\"\"\n # Input validation.\n if isinstance(entries, list) \\\n and isinstance(priority, str) \\\n and isinstance(group_num, int) \\\n and len(entries) <= 4:\n self._group_num = group_num\n\n # Member variable declaration and initialization.\n self._members = entries\n self._priority = priority\n else:\n raise ValueError(\"Invalid parameter.\")\n\n def size(self):\n \"\"\"\n Function that returns the size of group.\n :return: Returns an integer.\n \"\"\"\n # Return statement.\n return len(self._members)\n\n def update(self, visitor):\n \"\"\"\n Function that appends members to groups.\n :param visitor: Required list.\n :return: No return.\n \"\"\"\n # Input validation.\n if isinstance(visitor, list) and self.size() != constants.MAX_GROUP:\n self._members.append(visitor)\n else:\n raise ValueError(\"Parameter must be of type list.\")\n\n def get_priority(self):\n \"\"\"\n Function that returns group priority.\n :return: Returns a string.\n \"\"\"\n # Return statement.\n return self._priority\n\n def get_members(self):\n \"\"\"\n Function that returns group member information.\n :return: Returns a list.\n \"\"\"\n # Return statement.\n return self._members\n\n def get_group_num(self):\n \"\"\"\n Function that returns the group number.\n :return: Returns an int.\n \"\"\"\n # Return statement.\n return self._group_num\n", "sub_path": "definitions/definitions.py", "file_name": "definitions.py", "file_ext": "py", "file_size_in_byte": 16424, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "constants.constants.TICKET_LEN", "line_number": 69, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 69, "usage_type": "name"}, {"api_name": "modules.validate.check_ticket", "line_number": 71, "usage_type": "call"}, {"api_name": "constants.constants.MIN_AGE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 75, "usage_type": "name"}, {"api_name": "constants.constants.MIN_HEIGHT", "line_number": 78, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 78, "usage_type": "name"}, {"api_name": "re.search", "line_number": 82, "usage_type": "call"}, {"api_name": "constants.constants.MIN_ACCP", "line_number": 98, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 98, "usage_type": "name"}, {"api_name": "constants.constants.MAX_ACCP", "line_number": 102, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 102, "usage_type": "name"}, {"api_name": "constants.constants.MAX_GROUP", "line_number": 118, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 118, "usage_type": "name"}, {"api_name": "constants.constants.MAX_GROUP", "line_number": 130, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 130, "usage_type": "name"}, {"api_name": "modules.validate.update_csv", "line_number": 143, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 234, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 238, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 314, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 317, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 332, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 335, "usage_type": "call"}, {"api_name": "constants.constants.MAX_GROUP", "line_number": 418, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 418, "usage_type": "name"}]} +{"seq_id": "425677528", "text": "#!/usr/bin/env python3\n\nimport sys\nimport pandas as pd\nimport numpy as np\nimport glob\nfrom functools import reduce\n\npath = \"/study/midusref/DATA/Eyetracking/david_analysis/data_processed/[0-9][0-9][0-9]/*_data_type_compiled.csv\"\n\nfiles = sorted(glob.glob(path))\n\nprint (len(files))\n\nappended_data = []\n\n# file_name = 'df_'\n# full_name_list = []\n# file_counter = 1\n\nfor file in files:\n\n\tdata = pd.read_csv(file)\n\n\tdata_df = data[['iaps_number', 'percent_valid']]\n\tdata_df['iaps_number'] = data_df['iaps_number'].astype(str)\n\t#subject_df = file[['subject_number']]\n\tsubject_number = file.split('/')[7]\n\tdata_df = data_df.rename(columns={'percent_valid':subject_number})\n\t\n\t#data_transposed = data_df.T\n\t#print (data_transposed)\n\n\t#print (data_df)\n\n\tappended_data.append(data_df)\n\n\t# file_counter = str(file_counter)\n\t# full_name = (file_name + file_counter)\n\t# full_name_list.append(full_name)\n\t# file_counter = int(file_counter)\n\t# file_counter += 1\n\n# print (len(full_name_list))\n\n# transposed_data = []\n\n# for data in appended_data:\n# \tdata = data.set_index('iaps_number').T\n# \ttransposed_data.append(data)\n\ndf_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10,df_11, df_12, df_13, df_14, df_15, df_16, df_17, df_18, df_19, df_20, df_21, df_22, df_23, df_24, df_25, df_26, df_27, df_28, df_29, df_30, df_31, df_32, df_33, df_34, df_35, df_36, df_37, df_38, df_39, df_40, df_41, df_42, df_43, df_44, df_45, df_46, df_47, df_48, df_49, df_50, df_51, df_52, df_53, df_54, df_55, df_56, df_57, df_58, df_59, df_60, df_61, df_62, df_63, df_64, df_65, df_66, df_67, df_68, df_69, df_70, df_71, df_72, df_73, df_74, df_75, df_76, df_77, df_78, df_79, df_80, df_81, df_82, df_83, df_84, df_85, df_86, df_87, df_88, df_89, df_90, df_91, df_92, df_93, df_94, df_95, df_96, df_97, df_98, df_99, df_100, df_101, df_102, df_103, df_104, df_105, df_106, df_107, df_108, df_109, df_110, df_111, df_112, df_113, df_114, df_115, df_116, df_117, df_118, df_119, df_120, df_121, df_122, df_123, df_124, df_125 = appended_data\n\ndata_list = [df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10,df_11, df_12, df_13, df_14, df_15, df_16, df_17, df_18, df_19, df_20, df_21, df_22, df_23, df_24, df_25, df_26, df_27, df_28, df_29, df_30, df_31, df_32, df_33, df_34, df_35, df_36, df_37, df_38, df_39, df_40, df_41, df_42, df_43, df_44, df_45, df_46, df_47, df_48, df_49, df_50, df_51, df_52, df_53, df_54, df_55, df_56, df_57, df_58, df_59, df_60, df_61, df_62, df_63, df_64, df_65, df_66, df_67, df_68, df_69, df_70, df_71, df_72, df_73, df_74, df_75, df_76, df_77, df_78, df_79, df_80, df_81, df_82, df_83, df_84, df_85, df_86, df_87, df_88, df_89, df_90, df_91, df_92, df_93, df_94, df_95, df_96, df_97, df_98, df_99, df_100, df_101, df_102, df_103, df_104, df_105, df_106, df_107, df_108, df_109, df_110, df_111, df_112, df_113, df_114, df_115, df_116, df_117, df_118, df_119, df_120, df_121, df_122, df_123, df_124, df_125]\n\n\n# cols = list(transposed_data[0].columns)\n# cols.append(\"iaps_number\")\n# print (cols)\n\n\nfrom functools import reduce\n\nfinal_df = reduce(lambda x,y: pd.merge(x,y, on=\"iaps_number\", how='outer'), data_list)\nfinal_df.rename(columns ={'iaps_number':'subject_number'}, inplace=True)\nprint (final_df)\n\ndata_transposed = final_df.T\nprint (data_transposed)\n\ndata_transposed.to_csv(\"/study/midusref/DATA/Eyetracking/david_analysis/QA/validity_by_iaps.csv\", header=False, na_rep='NA')\n\n\n", "sub_path": "4_compute_stimuli_validity.py", "file_name": "4_compute_stimuli_validity.py", "file_ext": "py", "file_size_in_byte": 3399, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "glob.glob", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "457601554", "text": "\r\n\r\nfrom jira import JIRA\r\n#import json\r\n#from dateutil import parser\r\n#import datetime\r\nimport ccilib as cci\r\n#import getpass\r\n#import sys\r\n#import gspread\r\n#from oauth2client.service_account import ServiceAccountCredentials\r\nimport re\r\n\r\n\r\nclass failed_content:\r\n \r\n def __init__(self, title, cci_file):\r\n self.title = title;\r\n self.cc = cci.cci(cci_file);\r\n self.items = self.cc.get_unique_cars_cgi_all()\r\n self.cla_number = self.cc.get_cla_count();\r\n self.failed_items = set();\r\n self.failed_cqc_items =set();\r\n self.failed_cqa_items = set();\r\n self.issues = set()\r\n self.activities = set(self.cc.get_cla_numbers(self.items));\r\n self.extra_failed_cla = set()\r\n\r\n \r\n def failed_cla(self):\r\n return set(self.cc.get_cla_numbers(self.failed_items));\r\n \r\n def all_failed_cla(self):\r\n return self.failed_cla() | self.extra_failed_cla\r\n \r\n def failed_cqc_cla(self):\r\n return set(self.cc.get_cla_numbers(self.failed_cqc_items));\r\n \r\n def failed_cqa_cla(self):\r\n return set(self.cc.get_cla_numbers(self.failed_cqa_items));\r\n \r\n def cqc_items_failure_rate(self):\r\n fr = 'N/A'\r\n if self.cla_number !=0:\r\n fr = len(self.failed_cqc_items)/self.cla_number\r\n return fr;\r\n \r\n def cqa_items_failure_rate(self):\r\n fr = 'N/A'\r\n if self.cla_number !=0:\r\n fr = len(self.failed_cqa_items)/self.cla_number\r\n return fr;\r\n \r\n def items_failure_rate(self):\r\n fr = 'N/A'\r\n if self.cla_number !=0:\r\n fr = len(self.failed_items)/self.cla_number\r\n return fr;\r\n \r\ndef issue_has_cgi(issue, item):\r\n des = issue.fields.description;\r\n ex = item + \"[^>]*<\"\r\n findings = re.findall(ex, des)\r\n return len(findings)\r\n\r\n\r\ndef item_failed(CGI):\r\n \r\n failed = 0;\r\n jac = JIRA('https://jira.cengage.com');\r\n query ='project = MTQA AND text ~ ' + CGI;\r\n issues = jac.search_issues(query);\r\n for issue in issues:\r\n if str(issue.fields.status) in ('Open','In Progress', 'Reopened') or str(issue.fields.resolution)=='Fixed' :\r\n failed = 1;\r\n return failed;\r\n\r\ndef find_all_issues(query):\r\n# query ='project = MTQA AND issuetype = Bug AND labels = back_half AND labels in (WLCQC)';\r\n jac = JIRA('https://jira.cengage.com');\r\n bunch = 50;\r\n issues = [];\r\n while bunch == 50:\r\n print('1')\r\n iss = jac.search_issues(query, startAt = len(issues) , maxResults = 50);\r\n bunch = len(list(iss))\r\n issues = issues + list(iss);\r\n print('2')\r\n return issues;\r\n\r\nif __name__ == \"__main__\":\r\n \r\n \r\n titles = {'Conectados':[1]}\r\n \r\n query12 = 'project = MTQA AND issuetype = Bug AND labels = WL_2020 AND labels in (WLCQC, WLCQA) AND resolution in (Unresolved, Fixed) and component in (Content) and priority in (\"High/Critical\", \"Blocker/Showstopper\") AND labels = Conectados and bucket = \"Phase 3\"'\r\n query34 = 'project = MTQA AND issuetype = Bug AND labels = WL_2020 AND labels in (WLCQC, WLCQA) AND resolution in (Unresolved) and component in (Content) and priority in (\"Medium/Major\", \"Low/Minor\")'\r\n \r\n issues = find_all_issues(query12)\r\n print('issues = ', len(issues), '\\n');\r\n\r\n for key in titles: \r\n title = key;\r\n cci_file ='C:\\\\Users\\\\gyesayan\\\\CARS\\\\CCI\\\\' + title + '_CCI.csv';\r\n rut = failed_content(title, cci_file)\r\n \r\n for item in rut.items:\r\n for issue in issues:\r\n\r\n if issue_has_cgi(issue, item) and title in issue.fields.labels:\r\n# print(item, issue.key, issue_has_cgi(issue, item))\r\n if \"WLCQA\" in issue.fields.labels:\r\n rut.failed_cqa_items.add(item);\r\n if \"WLCQC\" in issue.fields.labels:\r\n rut.failed_cqc_items.add(item);\r\n rut.failed_items.add(item);\r\n rut.issues.add(issue.key);\r\n \r\n for cla in rut.activities:\r\n for issue in issues:\r\n if (cla in issue.raw['fields'][\"description\"] or cla in issue.fields.summary) and title in issue.fields.labels:\r\n# print(cla, issue)\r\n rut.extra_failed_cla.add(cla);\r\n rut.issues.add(issue.key);\r\n\r\n print(title)\r\n\r\n items = len(set(rut.items))\r\n failed_items = len(set(rut.failed_items))\r\n print(\"Overall unique items: \",items)\r\n print(\"failed items: \",failed_items)\r\n print(\"item failure rate: \",failed_items/items)\r\n print(\"failed CLA: \",len(rut.all_failed_cla()))\r\n\r\n\r\n\r\n \r\n", "sub_path": "failure_rate2020.py", "file_name": "failure_rate2020.py", "file_ext": "py", "file_size_in_byte": 4724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ccilib.cci", "line_number": 19, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 63, "usage_type": "call"}, {"api_name": "jira.JIRA", "line_number": 70, "usage_type": "call"}, {"api_name": "jira.JIRA", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "278736257", "text": "import pandas as pd\nimport sys\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\ndef get_stats(stat='BaseSpirit'):\n data = pd.read_csv('data/recipes.csv')\n data.fillna('nothing', inplace=True)\n # desired_cols = ['Glass','Occasions','Flavor','BaseSpirit','CocktailType','Preparation','Served','Strength','Difficulty','Hours','Theme','Brands','Garnish']\n stats = defaultdict(int)\n \n for i, row in data.iterrows():\n for col in data.columns:\n if col == stat:\n names = row[col].split(\";\")\n for name in names:\n if name not in stats:\n stats[name] = 1\n else:\n stats[name] += 1\n\t \n for i in sorted(stats, key = stats.get, reverse = True):\n print(i + \": \" + str(stats[i]))\n print(\"Missing:\" + str(stats['nothing']))\n \n plt.barh(*zip(*stats.items()))\n plt.show()\n\t\t\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n get_stats(sys.argv[1])\n else:\n get_stats()", "sub_path": "stats.py", "file_name": "stats.py", "file_ext": "py", "file_size_in_byte": 955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "587507092", "text": "from flask import json\nimport requests\nfrom requests.api import request\n\nURL = \"http://127.0.0.1:5000\"\nBACKUP_URL = \"https://retro-video-store-api.herokuapp.com\"\n\n\n'''\n =============================================\n HELPER PRINTS\n =============================================\n'''\n\ndef bar_break():\n print(\"\\n==========================\\n\")\n\ndef list_options_ee():\n options = {\n \"1\" : \"Add Video to Store Stock\",\n \"2\" : \"Edit Video Info\",\n \"3\" : \"Remove Video From Inventory\",\n \"4\" : \"View Current Store Stock\",\n \"5\" : \"View Video Info\",\n \"6\" : \"Add New Customer\",\n \"7\" : \"Edit Existing Customer\",\n \"8\" : \"Delete Existing Customer\",\n \"9\" : \"View Existing Customer Records\",\n \"10\" : \"View All Existing Customers\",\n \"11\" : \"Check Out\",\n \"12\" : \"Check In\"\n }\n\n bar_break()\n print(\"Here are your available options:\\n\")\n for choice in options:\n print(f\"Option {choice}. {options[choice]}\")\n\n bar_break()\n\n return options\n\ndef list_options_cust():\n options = {\n\n }\n pass\n\n'''\n =============================================\n EMPLOYEE OPTION FUNCTIONS\n =============================================\n'''\n\ndef add_video():\n print(\"Enter video info below:\")\n request_body = {}\n request_body[\"title\"] = input(\"Title: \")\n request_body[\"release_date\"] = input(\"Release date: \")\n request_body[\"total_inventory\"] = input(\"Total inventory: \")\n\n response = requests.post(URL +\"/videos\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef edit_video():\n print(\"Enter updated video info below:\")\n request_body = {}\n video_id = input(\"Video ID: \")\n request_body[\"title\"] = input(\"Title: \")\n request_body[\"release_date\"] = input(\"Release date: \")\n request_body[\"total_inventory\"] = input(\"Total inventory: \")\n\n response = requests.put(URL +\"/videos/\" +video_id, json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef remove_video():\n print(\"DELETE VIDEO - THIS ACTION CANNOT BE UNDONE\")\n if input(\"Are you sure? Y/N \") != \"Y\":\n print(\"ACTION CANCELLED\")\n return\n \n video_id = input(\"Video ID: \")\n response = requests.delete(URL +\"/videos/\" +video_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef view_video_stock():\n print(\"All Videos in Store Stock:\")\n response = requests.get(URL +\"/videos\")\n print(json.dumps(response.json(), indent=2))\n return\n\ndef view_single_video():\n print(\"Video Info Request:\")\n video_id = input(\"Video ID: \")\n response = requests.get(URL +\"/videos/\" +video_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef add_customer():\n print(\"Enter customer info below:\")\n request_body = {}\n request_body[\"name\"] = input(\"Name: \")\n request_body[\"phone\"] = input(\"Phone number: \")\n request_body[\"postal_code\"] = input(\"Postal code: \")\n\n response = requests.post(URL +\"/customers\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef edit_customer():\n print(\"Enter updated customer info below:\")\n request_body = {}\n customer_id = input(\"Customer ID: \")\n request_body[\"name\"] = input(\"Name: \")\n request_body[\"phone\"] = input(\"Phone number: \")\n request_body[\"postal_code\"] = input(\"Postal code: \")\n \n response = requests.put(URL +\"/customers/\" +customer_id, json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef delete_customer():\n print(\"DELETE CUSTOMER - THIS ACTION CANNOT BE UNDONE\")\n if input(\"Are you sure? Y/N \") != \"Y\":\n print(\"ACTION CANCELLED\")\n return\n \n customer_id = input(\"Customer ID: \")\n response = requests.delete(URL +\"/customers/\" +customer_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef view_customer():\n print(\"Customer Info Request:\")\n customer_id = input(\"Customer ID: \")\n response = requests.get(URL +\"/customers/\" +customer_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef view_all_customers():\n print(\"All Active Customer Accounts:\")\n response = requests.get(URL +\"/customers\")\n print(json.dumps(response.json(), indent=2))\n return\n\ndef checking_out():\n print(\"Check Out a Video:\")\n request_body = {}\n request_body[\"customer_id\"] = int(input(\"Customer ID: \"))\n request_body[\"video_id\"] = int(input(\"Video ID: \"))\n\n response = requests.post(URL +\"/rentals/check-out\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef checking_in():\n print(\"Check In a Video:\")\n request_body = {}\n request_body[\"customer_id\"] = int(input(\"Customer ID: \"))\n request_body[\"video_id\"] = int(input(\"Video ID: \"))\n\n response = requests.post(URL +\"/rentals/check-in\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\n'''\n =============================================\n CUSTOMER OPTION FUNCTIONS\n =============================================\n'''\n\ndef find_videos_by():\n print(\"I'm sorry, that feature is not yet available in your area\")\n return\n\ndef check_current_rentals():\n print(\"I'm sorry, that feature is not yet available in your area\")\n return\n\n'''\n =============================================\n MAIN\n =============================================\n'''\n\ndef main(in_use=True, is_employee=False):\n print(\"WELCOME TO RETRO VIDEO STORE\")\n\n ee_id = input(\"Employee? Please enter your 4 digit id. Hit Enter to continue as a customer.\\n\")\n if len(ee_id) == 4 and ee_id.isdigit():\n print(f\"Welcome to work, Employee {ee_id}\")\n is_employee = True\n list_options_ee()\n\n while is_employee and in_use:\n func_call_dict = {\n \"1\" : add_video,\n \"2\" : edit_video,\n \"3\" : remove_video,\n \"4\" : view_video_stock,\n \"5\" : view_single_video,\n \"6\" : add_customer,\n \"7\" : edit_customer,\n \"8\" : delete_customer,\n \"9\" : view_customer,\n \"10\" : view_all_customers,\n \"11\" : checking_out,\n \"12\" : checking_in\n }\n\n choice = None\n while choice not in func_call_dict:\n choice = input(\"What would you like to do? Q to quit.\\n\")\n\n if choice == \"Q\" or choice == 'q':\n print(f\"Goodbye Retro Video Store Employee {ee_id}!\")\n bar_break()\n return\n \n func_call_dict[choice]()\n bar_break()\n \n while in_use:\n func_call_dict = {\n \"1\" : find_videos_by,\n \"2\" : check_current_rentals\n }\n\n choice = None\n while choice not in func_call_dict:\n choice = input(\"What would you like to do? Q to quit.\\n\")\n\n if choice == \"Q\" or choice == 'q':\n print(f\"Goodbye Retro Video Store Employee {ee_id}!\")\n bar_break()\n return\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.post", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 63, "usage_type": "name"}, {"api_name": "requests.put", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 75, "usage_type": "name"}, {"api_name": "requests.delete", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 86, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 92, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 99, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 110, "usage_type": "name"}, {"api_name": "requests.put", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 122, "usage_type": "name"}, {"api_name": "requests.delete", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 133, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 140, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 146, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 156, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 166, "usage_type": "name"}]} +{"seq_id": "558839874", "text": "import unittest\nimport mock\n\nfrom StringIO import StringIO\n\nfrom superlance.compat import xmlrpclib\nfrom superlance.oome_monitor import OomeMonitor, OomeProcess\nfrom superlance.tests.dummy import (DummyRPCServer,\n DummySupervisorRPCNamespace)\n\nclass TestOomeProcess(unittest.TestCase):\n \"\"\"\n Test class to test OomeProcess methods and properties\n \"\"\"\n @mock.patch('sys.stderr', new_callable=StringIO)\n def setUp(self, mock_stderr):\n \"\"\"\n Setup function to initialise tests\n \"\"\"\n self.stderr = mock_stderr\n process_object = DummySupervisorRPCNamespace.all_process_info[0]\n self.process = OomeProcess(process_object, oome_file='oome_file')\n \n def test_init(self):\n \"\"\"\n Tests if OomeProcess could be created\n \"\"\"\n self.assertTrue(isinstance(self.process, OomeProcess))\n \n def test_env_vars(self):\n \"\"\"\n Tests getting the env_vars property. Dummy env var resembles real\n environ file inside /proc//\n \"\"\"\n dummy_env_var = (\"SUPERVISOR_GROUP_NAME=test_server\\x00SUPERVISOR_PROC\"\n \"ESS_NAME=test_server\\x00HOMEDIR=homedir\\x00SUPERVISOR_ENABLED=1\"\n \"\\x00SUPERVISOR_SERVER_URL=unix:///tmp/supervisor.sock\\x00OOME_FIL\"\n \"E=oome_file\")\n expected = {'OOME_FILE': 'oome_file', 'HOMEDIR': 'homedir'}\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data=dummy_env_var), create=True) as m:\n self.assertEqual(sorted(expected.items()),\n sorted(self.process.env_vars.items()))\n \n def test_get_oome_file_oome_file_init(self):\n \"\"\"\n Tests getting the oome_file name property if it was set during init\n \"\"\"\n self.assertEqual('oome_file', self.process.oome_file)\n \n def test_get_oome_file_oome_file_env(self):\n \"\"\"\n Tests getting the oome_file name property if $OOME_FILE is in env vars\n \"\"\"\n self.process._oome_file = None\n self.process._env_vars = {'OOME_FILE': 'oome_file_environ'}\n self.assertEqual('oome_file_environ', self.process.oome_file)\n \n def test_get_oome_file_homedir_env(self):\n \"\"\"\n Tests getting the oome_file name property if $HOMEDIR is in env vars\n \"\"\"\n self.process._oome_file = None\n self.process._env_vars = {'HOMEDIR': 'homedir'}\n self.assertEqual('homedir/work/oome', self.process.oome_file)\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_get_oome_file_cwd(self, mock_readlink):\n \"\"\"\n Tests getting the oome_file name property if no env variables were set\n \"\"\"\n mock_readlink.return_value = 'cwd'\n self.process._oome_file = None\n self.process._env_vars = {'USELESS_VAR': '3.141599'}\n self.assertEqual('cwd/work/oome', self.process.oome_file)\n \n def test_set_oome_file(self):\n \"\"\"\n Tests setting oome_file property\n \"\"\"\n self.process.oome_file = 'real_oome_file'\n self.assertEqual('real_oome_file', self.process.oome_file)\n \n\n @mock.patch('superlance.oome_monitor.os.path.isfile',\n return_value=True)\n def test_check_oome_file_exists(self, mock_os_path):\n \"\"\"\n Tests checking oome_file existence\n \"\"\"\n self.assertTrue(self.process.check_oome_file())\n \n @mock.patch('superlance.oome_monitor.os.path.isfile',\n return_value=False)\n def test_check_oome_file_does_not_exist(self, mock_os_path):\n \"\"\"\n Tests checking oome_file non existence\n \"\"\"\n self.assertFalse(self.process.check_oome_file())\n \n @mock.patch('superlance.oome_monitor.os.remove', return_value=True)\n def test_oome_file_delete(self, mock_os_remove):\n \"\"\"\n Tests deleting the oome file\n \"\"\"\n self.process.delete_oome_file()\n self.assertEqual(\"oome file oome_file was deleted\\n\",\n self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.remove',\n side_effect=OSError('file'))\n def test_oome_file_delete(self, mock_os_remove):\n \"\"\"\n Tests deleting the oome file failure\n \"\"\"\n self.process.delete_oome_file()\n self.assertEqual(\"oome file could not be removed: file\\n\",\n self.stderr.getvalue())\n \n\nclass TestOomeMonitor(unittest.TestCase):\n \"\"\"\n Test class to test OomeMonitor methods and properties\n \"\"\"\n @mock.patch('superlance.oome_monitor.ExternalService')\n @mock.patch('sys.stdin', new_callable=StringIO)\n @mock.patch('sys.stdout', new_callable=StringIO)\n @mock.patch('sys.stderr', new_callable=StringIO)\n def setUp(self, mock_stderr, mock_stdout, mock_stdin, mock_ext_service):\n \"\"\"\n Setup function to initialise tests\n \"\"\"\n rpc = DummyRPCServer()\n process_name = ['foo']\n self.stderr = mock_stderr\n self.stdout = mock_stdout\n self.stdin = mock_stdin\n self.oome_monitor_all = OomeMonitor(rpc, all=True)\n self.oome_monitor_single = OomeMonitor(rpc, process_name=process_name)\n dummy_supervisor = DummySupervisorRPCNamespace()\n self.oome_monitor_all.rpc.supervisor = dummy_supervisor\n ext_service = mock_ext_service('some script')\n self.oome_monitor_single_ext_svc = OomeMonitor(rpc,\n process_name=process_name, ext_service=ext_service)\n \n def test_init(self):\n \"\"\"\n Tests OomeMonitor object creation\n \"\"\"\n self.assertTrue(isinstance(self.oome_monitor_all, OomeMonitor))\n self.assertTrue(isinstance(self.oome_monitor_single, OomeMonitor))\n \n def test_generate_processes(self):\n \"\"\"\n Tests OomeMonitor _generate_processes method\n \"\"\"\n self.assertEqual(len(self.oome_monitor_all.processes),\n len(DummySupervisorRPCNamespace.all_process_info))\n self.assertEqual(len(self.oome_monitor_single.processes), 1)\n \n def test_write_stderr(self):\n \"\"\"\n Tests write_stderr\n \"\"\"\n self.oome_monitor_all.write_stderr('some message')\n self.assertEqual('some message\\n',\n self.stderr.getvalue())\n\n def test_procs(self):\n \"\"\"\n Tests OomeMonitor.procs property\n \"\"\"\n self.assertEqual(self.oome_monitor_all.procs,\n DummySupervisorRPCNamespace.all_process_info)\n # It should match to \"foo\" process defined in Dummy\n self.assertEqual(self.oome_monitor_single.procs,\n DummySupervisorRPCNamespace.all_process_info[:1])\n\n def test_restart(self):\n \"\"\"\n Tests OomeMonitor.restart method\n \"\"\"\n self.oome_monitor_all.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.oome_monitor_single.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.oome_monitor_single_ext_svc.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.assertEqual('foo restarted\\nfoo restarted\\nfoo restarted\\n',\n self.stderr.getvalue())\n \n def test_failed_restart(self):\n \"\"\"\n Tests OomeMonitor.restart method failure\n \"\"\"\n self.oome_monitor_all.rpc.supervisor.stopProcess = mock.MagicMock(\n side_effect=xmlrpclib.Fault('stop', 'error'))\n self.oome_monitor_all.rpc.supervisor.startProcess = mock.MagicMock(\n side_effect=xmlrpclib.Fault('start', 'error'))\n self.oome_monitor_all.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.assertEqual(\"Failed to stop process foo: \\n\"\n \"Failed to start process foo: \\n\",\n self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_run(self, mock_readlink):\n \"\"\"\n Functional test for run() all method with one of the processes (bar) having\n an oome file. OomeMonitor will try to delete the mocked oome file\n and restart the process (using dummy rpc.supervisor)\n \"\"\"\n self.stdin.write('eventname:TICK len:0\\n')\n self.stdin.seek(0)\n # returning that the process has an oome file\n self.oome_monitor_all.processes[1].check_oome_file = mock.MagicMock()\n # mocking the actual file delete\n self.oome_monitor_all.processes[1].delete_oome_file = mock.MagicMock()\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data='test'), create=True) as m:\n self.oome_monitor_all.run(test=True)\n self.assertEqual(\"bar restarted\\n\", self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_run_sigle(self, mock_readlink):\n \"\"\"\n Functional test for run() single method with the processes (foo) having\n an oome file. OomeMonitor will try to delete the mocked oome file\n and restart the process (using dummy rpc.supervisor)\n \"\"\"\n self.stdin.write('eventname:TICK len:0\\n')\n self.stdin.seek(0)\n # returning that the process has an oome file\n self.oome_monitor_single.processes[0].check_oome_file = \\\n mock.MagicMock()\n # mocking the actual file delete\n self.oome_monitor_single.processes[0].delete_oome_file = \\\n mock.MagicMock()\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data='test'), create=True) as m:\n self.oome_monitor_single.run(test=True)\n self.assertEqual(\"foo restarted\\n\", self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_dry_run(self, mock_readlink):\n \"\"\"\n Functional test for run() method with one of the processes (bar) having\n an oome file. OomeMonitor will not try to delete the mocked oome file\n or restart the process due to dry run\n \"\"\"\n self.stdin.write('eventname:TICK len:0\\n')\n self.stdin.seek(0)\n self.oome_monitor_all.dry = True\n # returning that the process has an oome file\n self.oome_monitor_all.processes[1].check_oome_file = mock.MagicMock()\n # mocking the actual file delete\n self.oome_monitor_all.processes[1].delete_oome_file = mock.MagicMock()\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data='test'), create=True) as m:\n self.oome_monitor_all.run(test=True)\n self.assertEqual(\"oome file is detected for bar, not restarting due to\"\n \" dry-run\\n\", self.stderr.getvalue())\n \nif __name__ == '__main__':\n unittest.main()", "sub_path": "superlance/tests/oome_monitor_test.py", "file_name": "oome_monitor_test.py", "file_ext": "py", "file_size_in_byte": 10851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 21, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 21, "usage_type": "name"}, {"api_name": "superlance.oome_monitor.OomeProcess", "line_number": 22, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 15, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 15, "usage_type": "name"}, {"api_name": "superlance.oome_monitor.OomeProcess", "line_number": 28, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 40, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 41, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 67, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 85, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 93, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 101, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 110, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 121, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummyRPCServer", "line_number": 133, "usage_type": "call"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 138, "usage_type": "call"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 139, "usage_type": "call"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 140, "usage_type": "call"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 143, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 125, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 126, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 126, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 127, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 127, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 128, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 128, "usage_type": "name"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 150, "usage_type": "argument"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 151, "usage_type": "argument"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 158, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 158, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 174, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 174, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 177, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 177, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 184, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 184, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 186, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 186, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 188, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 188, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 196, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib.Fault", "line_number": 197, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib", "line_number": 197, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 198, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib.Fault", "line_number": 199, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib", "line_number": 199, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 201, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 201, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 216, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 218, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 219, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 220, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 206, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 235, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 238, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 239, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 240, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 224, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 255, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 257, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 258, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 259, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 244, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 265, "usage_type": "call"}]} +{"seq_id": "170910237", "text": "from sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nimport pandas as pd\nfrom sklearn.utils.testing import all_estimators\n\n# scikit-learn 0.20.3 에서 31개\n# scikit-learn 0.21.2 에서 40개중 4개만 돔.\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\niris_data = pd.read_csv(\"./keras/ml/Data/iris2.csv\", encoding= 'utf-8' )\n\n# 붓꽃 데이이터 레이블과 입력 데이터로 분리하기\ny = iris_data.loc[:, \"Name\"]\nx = iris_data.loc[:,[ \"SepalLength\",\"SepalWidth\",\"PetalLength\",\"PetalWidth\"]]\n\n# 학습 전용과 테스트 전용 분리하기\nwarnings.filterwarnings('ignore')\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n\n# 그리드 서치에서 사용 할 매개 변수 -- (*1)\n\nparameters = [\n {\"n_estimators\": [1,10,100,1000], \"min_samples_split\":[2,3,4,5]},\n {\"n_estimators\": [1,10,100,1000], \"min_samples_split\":[2,3,4,5], \"min_samples_leaf\":[1,2,3,4]},\n {\"n_estimators\": [1,10,100,1000], \"min_samples_split\":[2,3,4,5], \"bootstrap\": [\"True\", \"False\"]}\n]\n\n# 그리드 서치 --- (*2)\nkfold_cv = KFold(n_splits= 5, shuffle=True)\nmodel = GridSearchCV( RandomForestClassifier(), parameters, cv=kfold_cv)\nmodel.fit(x_train, y_train)\nprint(\"/n-------------------\")\nprint(\" 최적의 매개 변수 = \", model.best_estimator_)\n\n# 최적의 매개 변수로 평가하기 ---(*3)\ny_pred = model.predict(x_test)\nprint(\"/n-------------------\")\nprint(\"최종 정답률 = \", accuracy_score(y_test, y_pred))", "sub_path": "ml/m10_gridSearch2_rf.py", "file_name": "m10_gridSearch2_rf.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "warnings.filterwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "295743145", "text": "import tests.aswwu.behaviors.elections.election.election_subtests as election_subtests\nimport tests.aswwu.behaviors.elections.position.position_requests as position_requests\nimport tests.aswwu.behaviors.elections.vote.vote_requests as vote_requests\nimport tests.aswwu.behaviors.elections.vote.vote_subtests as vote_subtests\nimport tests.aswwu.behaviors.auth.auth_subtests as auth_subtests\nimport tests.aswwu.data.paths as paths\nimport tests.utils as utils\nimport json\nfrom tests.conftest import testing_server\n\nPOSITION_DATA = {\n 'position': 'Senator',\n 'election_type': 'aswwu',\n 'active': 'True',\n 'order': 1\n}\n\n\ndef test_post_vote(testing_server):\n admin_session = election_subtests.create_elections_admin()\n election_id = election_subtests.assert_post_dynamic_election(admin_session)['id']\n position_resp = position_requests.post_position(admin_session, POSITION_DATA['position'],\n POSITION_DATA['election_type'],\n POSITION_DATA['active'], POSITION_DATA['order'])\n position_id = json.loads(position_resp.text)['id']\n vote_subtests.create_votes(admin_session, election_id, position_id)\n\n\ndef test_post_vote_candidates(testing_server):\n pass\n\n\ndef test_get_vote(testing_server):\n admin_session = election_subtests.create_elections_admin()\n election_id = election_subtests.assert_post_dynamic_election(admin_session)['id']\n position_resp = position_requests.post_position(admin_session, POSITION_DATA['position'],\n POSITION_DATA['election_type'],\n POSITION_DATA['active'], POSITION_DATA['order'])\n position_id = json.loads(position_resp.text)['id']\n vote_data = vote_subtests.create_votes(admin_session, election_id, position_id)\n users = utils.load_csv(paths.USERS_PATH)\n\n for count, user in enumerate(users):\n user_session = auth_subtests.assert_verify_login(user)[1]\n resp = vote_requests.get_vote(user_session, position_id, user['username'])\n assert (resp.status_code == 200)\n resp_text = json.loads(resp.text)['votes']\n for vote in resp_text:\n vote_subtests.assert_vote_data(vote, vote_data[user['username']])\n", "sub_path": "tests/aswwu/behaviors/elections/vote/test_vote.py", "file_name": "test_vote.py", "file_ext": "py", "file_size_in_byte": 2317, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.create_elections_admin", "line_number": 20, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 20, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.assert_post_dynamic_election", "line_number": 21, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 21, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests.post_position", "line_number": 22, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests", "line_number": 22, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests.create_votes", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.create_elections_admin", "line_number": 34, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 34, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.assert_post_dynamic_election", "line_number": 35, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 35, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests.post_position", "line_number": 36, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests", "line_number": 36, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests.create_votes", "line_number": 40, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests", "line_number": 40, "usage_type": "name"}, {"api_name": "tests.utils.load_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "tests.utils", "line_number": 41, "usage_type": "name"}, {"api_name": "tests.aswwu.data.paths.USERS_PATH", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tests.aswwu.data.paths", "line_number": 41, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.auth.auth_subtests.assert_verify_login", "line_number": 44, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.auth.auth_subtests", "line_number": 44, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_requests.get_vote", "line_number": 45, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_requests", "line_number": 45, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests.assert_vote_data", "line_number": 49, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "385379372", "text": "import enum\n\n\nclass Action(enum.Enum):\n new = 0 # process have not arrive at CPU\n ready = 1 # ready to use CPU\n burst = 2 # actively using CPU\n block = 3 # I/O time\n ternimated = 4 # process terminates\n \n enter_CPU = 5\n leave_CPU = 6\n preempted = 7\n\n\nclass Process:\n def __init__(self, name: str):\n self.name = name\n self.arrival_time = 0 # process arrival time, in MILLISECONDS\n\n self.burst_time = [] # CPU burst time in MS\n self.block_time = [] # I/O block time in MS\n self.index = 0\n self.remain = 0\n\n # process current status\n self.action = Action.new\n # time of the process finish current status in MILLISECONDS. If process\n # enters CPU at x ms, and takes y ms CPU burst, action_leave will be\n # x + y\n self.action_enter = 0\n self.action_leave = 0\n\n self.wait_time = 0\n self.preempt_count = 0\n self.switch_count = 0\n self.tau = 0\n\n # use setattr(object, name, value) to add attribute with your needs\n\n\n\"\"\"\nLinear congruential generator, generate random numbers\nAlgorithm is inherited from POSIX\n\"\"\"\n\n\nclass LCG:\n def __init__(self):\n self.seed = 0\n\n # initialize seed, detail implementation see man srand48\n def srand48(self, seedval: int):\n self.seed = ((seedval & 0xFFFFFFFF) << 16) | 0x330E\n\n # get random number, detail implementation see man drand48\n def drand48(self) -> float:\n self.seed = (0x5DEECE66D * self.seed + 0xB) & 0xffffffffffff\n return float(self.seed / 0x1000000000000)", "sub_path": "util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "attribute"}]} +{"seq_id": "578582790", "text": "from operator import itemgetter\nimport os\nimport tkinter as tk\nimport ttk\nimport model\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import func\n\nsession = sessionmaker(bind=model.db)\nsession = session()\n\nroot = tk.Tk()\nroot.title(\"Rapport\")\n\ncontainer = ttk.Frame(root)\ncontainer.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))\ninfo_message = tk.StringVar()\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\n\ndef aantalreis():\n \"\"\"\n Hier wordt een rapport gemaakt van het aantal reizen per reiziger en die wordt in een tesktbestand opgeslagen.\n \"\"\"\n file = open(\"_aantal reizen per ov­chipkaart.txt\", 'w')\n ovgebruikers = session.query(model.Reis, func.count(model.Reis.ov_id)).group_by(model.Reis.ov_id)\n ovgebruikers = sorted(ovgebruikers, key=itemgetter(1), reverse=True)\n for ovgebruiker in ovgebruikers:\n ovgebruiker_name = session.query(model.Reis).filter_by(ov_id=ovgebruiker[0].ov_id).first().ov_id\n file.write(\"OV-chipkaart: {ovgebruiker} Totaal aantal reizen: {aantal_reizen}\\n\".format(ovgebruiker=ovgebruiker_name, aantal_reizen=ovgebruiker[1]))\n file.close()\n info_message.set(\"Het bestand is opgeslagen op:\\n{dir}\\\\_aantal reizen per ov­chipkaart.txt\".format(dir=current_dir))\n\ndef populairbesteming():\n \"\"\"\n Hier wordt een rapport gemaakt van de populairste bestemmingen en die wordt in een tesktbestand opgeslagen.\n \"\"\"\n file = open(\"_populairste-bestemmingen.txt\", 'w')\n stations = session.query(model.Reis, func.count(model.Reis.eindstation_id)).group_by(model.Reis.eindstation_id).all()\n stations = sorted(stations, key=itemgetter(1), reverse=True)\n for station in stations:\n station_name = session.query(model.Station).filter_by(station_id=station[0].eindstation_id).first().station_naam\n file.write(\"Station {station} is {aantal_bezocht} keer de bestemming geweest van een OV-gebruiker.\\n\".format(station=station_name, aantal_bezocht=station[1]))\n file.close()\n info_message.set(\"Het bestand is opgeslagen op:\\n{dir}\\\\_populairste-bestemmingen.txt\".format(dir=current_dir))\n\n\ndef populairvertrek():\n \"\"\"\n Hier wordt een rapport gemaakt van de populairste vertrekstations en die wordt in een tesktbestand opgeslagen.\n \"\"\"\n file = open(\"_populairste-vertrekstations.txt\", 'w')\n stations = session.query(model.Reis, func.count(model.Reis.beginstation_id)).group_by(model.Reis.beginstation_id).all()\n stations = sorted(stations, key=itemgetter(1), reverse=True)\n for station in stations:\n station_name = session.query(model.Station).filter_by(station_id=station[0].beginstation_id).first().station_naam\n file.write(\"Vanaf station {station} is {aantal_bezocht} keer een OV-gebruiker vertrokken.\\n\".format(station=station_name, aantal_bezocht=station[1]))\n file.close()\n info_message.set(\"Het bestand is opgeslagen op:\\n{dir}\\\\_populairste-vertrekstations.txt\".format(dir=current_dir))\n\n\ntk.Label(container, text=\"Raport -Kies een van de opties\", anchor=\"center\", font=\"-size 10 -weight bold\").grid(column=0, row=0, columnspan=3, sticky=(tk.W, tk.E))\ntk.Label(container, textvariable=info_message, wraplength=500).grid(column=0, columnspan=3, row=1, sticky=(tk.W, tk.E))\ntk.Button(container, text=\"De populairste bestemmingen\", command=populairbesteming).grid(column=0, row=2, sticky=tk.W)\ntk.Button(container, text=\"De populairste vertrekstations\", command=populairvertrek).grid(column=1, row=2, sticky=tk.W)\ntk.Button(container, text=\"Het aantal reizen per ov­chipkaart\", command=aantalreis).grid(column=2, row=2, sticky=tk.W)\n\n\nroot.mainloop()\n", "sub_path": "report.py", "file_name": "report.py", "file_ext": "py", "file_size_in_byte": 3627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 9, "usage_type": "call"}, {"api_name": "model.db", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 12, "usage_type": "call"}, {"api_name": "ttk.Frame", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.N", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.S", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.StringVar", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 18, "usage_type": "call"}, {"api_name": "model.Reis", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.count", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 25, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 26, "usage_type": "call"}, {"api_name": "model.Reis", "line_number": 28, "usage_type": "attribute"}, {"api_name": "model.Reis", "line_number": 38, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.count", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 38, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 39, "usage_type": "call"}, {"api_name": "model.Station", "line_number": 41, "usage_type": "attribute"}, {"api_name": "model.Reis", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.count", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 52, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 53, "usage_type": "call"}, {"api_name": "model.Station", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 61, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 64, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 65, "usage_type": "attribute"}]} +{"seq_id": "16446545", "text": "''' This module provides functions for embedding Bokeh plots in various\ndifferent ways.\n\nThere are a number of different combinations of options when embedding\nBokeh plots. The data for the plot can be contained in the document,\nor on a Bokeh server, or in a sidecar JavaScript file. Likewise, BokehJS\nmay be inlined in the document, or loaded from CDN or a Bokeh server.\n\nThe functions in ``bokeh.embed`` provide functionality to embed in all\nthese different cases.\n\n'''\n\nimport uuid\n\nfrom .protocol import serialize_json\nfrom .resources import Resources\nfrom .templates import (\n AUTOLOAD, AUTOLOAD_SERVER, AUTOLOAD_STATIC, FILE,\n NOTEBOOK_DIV, PLOT_DIV, PLOT_JS, PLOT_SCRIPT, RESOURCES\n)\nfrom .utils import encode_utf8\n\ndef components(plot_object, resources):\n ''' Return HTML components to embed a Bokeh plot.\n\n The data for the plot is stored directly in the returned HTML.\n\n .. note:: The returned components assume that BokehJS resources\n are **already loaded**.\n\n Args:\n plot_object (PlotObject) : Bokeh object to render\n typically a Plot or PlotContext\n resources (Resources, optional) : BokehJS resources config\n\n Returns:\n (script, div) : UTF-8 encoded\n\n '''\n ref = plot_object.ref\n elementid = str(uuid.uuid4())\n\n js = PLOT_JS.render(\n elementid = elementid,\n modelid = ref[\"id\"],\n modeltype = ref[\"type\"],\n all_models = serialize_json(plot_object.dump()),\n )\n script = PLOT_SCRIPT.render(\n plot_js = resources.js_wrapper(js),\n )\n div = PLOT_DIV.render(elementid=elementid)\n\n return encode_utf8(script), encode_utf8(div)\n\n\ndef notebook_div(plot_object):\n ''' Return HTML for a div that will display a Bokeh plot in an\n IPython Notebook\n\n The data for the plot is stored directly in the returned HTML.\n\n Args:\n plot_object (PlotObject) : Bokeh object to render\n typically a Plot or PlotContext\n\n Returns:\n div : UTF-8 encoded HTML text\n\n .. note:: Assumes ``bokeh.load_notebook()`` or the equivalent has\n already been executed.\n\n '''\n ref = plot_object.ref\n resources = Resources()\n elementid = str(uuid.uuid4())\n\n js = PLOT_JS.render(\n elementid = elementid,\n modelid = ref[\"id\"],\n modeltype = ref[\"type\"],\n all_models = serialize_json(plot_object.dump()),\n )\n script = PLOT_SCRIPT.render(\n plot_js = resources.js_wrapper(js),\n )\n div = PLOT_DIV.render(elementid=elementid)\n html = NOTEBOOK_DIV.render(\n plot_script = script,\n plot_div = div,\n )\n return encode_utf8(html)\n\n\ndef file_html(plot_object, resources, title, template=FILE):\n ''' Return an HTML document that embeds a Bokeh plot.\n\n The data for the plot is stored directly in the returned HTML.\n\n Args:\n plot_object (PlotObject) : Bokeh object to render\n typically a Plot or PlotContext\n resources (Resources) : a resource configuration for BokehJS assets\n title (str) : a title for the HTML document ```` tags\n template (Template, optional) : HTML document template (default: FILE)\n A Jinja2 Template, see bokeh.templates.FILE for the required\n template parameters\n\n Returns:\n html : standalone HTML document with embedded plot\n\n '''\n plot_resources = RESOURCES.render(\n js_raw = resources.js_raw,\n css_raw = resources.css_raw,\n js_files = resources.js_files,\n css_files = resources.css_files,\n )\n script, div = components(plot_object, resources)\n html = template.render(\n title = title,\n plot_resources = plot_resources,\n plot_script = script,\n plot_div = div,\n )\n return encode_utf8(html)\n\n\ndef autoload_static(plot_object, resources, script_path):\n ''' Return JavaScript code and a script tag that can be used to embed\n Bokeh Plots.\n\n The data for the plot is stored directly in the returned JavaScript code.\n\n Args:\n plot_object (PlotObject) :\n resources (Resources) :\n script_path (str) :\n\n Returns:\n (js, tag) :\n JavaScript code to be saved at ``script_path`` and a ``<script>``\n tag to load it\n\n Raises:\n ValueError\n\n '''\n if resources.mode == 'inline':\n raise ValueError(\"autoload_static() requires non-inline resources\")\n\n if resources.dev:\n raise ValueError(\"autoload_static() only works with non-dev resources\")\n\n elementid = str(uuid.uuid4())\n\n js = AUTOLOAD.render(\n all_models = serialize_json(plot_object.dump()),\n js_url = resources.js_files[0],\n css_files = resources.css_files,\n elementid = elementid,\n )\n\n tag = AUTOLOAD_STATIC.render(\n src_path = script_path,\n elementid = elementid,\n modelid = plot_object._id,\n modeltype = plot_object.__view_model__,\n loglevel = resources.log_level,\n )\n\n return encode_utf8(js), encode_utf8(tag)\n\n\ndef autoload_server(plot_object, session):\n ''' Return a script tag that can be used to embed Bokeh Plots from\n a Bokeh Server.\n\n The data for the plot is stored on the Bokeh Server.\n\n Args:\n plot_object (PlotObject) :\n session (session) :\n\n Returns:\n tag :\n a ``<script>`` tag that will execute an autoload script\n loaded from the Bokeh Server\n\n '''\n elementid = str(uuid.uuid4())\n resources = Resources(root_url=session.root_url, mode=\"server\")\n tag = AUTOLOAD_SERVER.render(\n src_path = resources._autoload_path(elementid),\n elementid = elementid,\n modelid = plot_object._id,\n root_url = resources.root_url,\n docid = session.docid,\n docapikey = session.apikey,\n loglevel = resources.log_level,\n )\n\n return encode_utf8(tag)\n", "sub_path": "bokeh/embed.py", "file_name": "embed.py", "file_ext": "py", "file_size_in_byte": 5891, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "uuid.uuid4", "line_number": 42, "usage_type": "call"}, {"api_name": "templates.PLOT_JS.render", "line_number": 44, "usage_type": "call"}, {"api_name": "templates.PLOT_JS", "line_number": 44, "usage_type": "name"}, {"api_name": "protocol.serialize_json", "line_number": 48, "usage_type": "call"}, {"api_name": "templates.PLOT_SCRIPT.render", "line_number": 50, "usage_type": "call"}, {"api_name": "templates.PLOT_SCRIPT", "line_number": 50, "usage_type": "name"}, {"api_name": "resources.js_wrapper", "line_number": 51, "usage_type": "call"}, {"api_name": "templates.PLOT_DIV.render", "line_number": 53, "usage_type": "call"}, {"api_name": "templates.PLOT_DIV", "line_number": 53, "usage_type": "name"}, {"api_name": "utils.encode_utf8", "line_number": 55, "usage_type": "call"}, {"api_name": "resources.Resources", "line_number": 76, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 77, "usage_type": "call"}, {"api_name": "templates.PLOT_JS.render", "line_number": 79, "usage_type": "call"}, {"api_name": "templates.PLOT_JS", "line_number": 79, "usage_type": "name"}, {"api_name": "protocol.serialize_json", "line_number": 83, "usage_type": "call"}, {"api_name": "templates.PLOT_SCRIPT.render", "line_number": 85, "usage_type": "call"}, {"api_name": "templates.PLOT_SCRIPT", "line_number": 85, "usage_type": "name"}, {"api_name": "resources.js_wrapper", "line_number": 86, "usage_type": "call"}, {"api_name": "templates.PLOT_DIV.render", "line_number": 88, "usage_type": "call"}, {"api_name": "templates.PLOT_DIV", "line_number": 88, "usage_type": "name"}, {"api_name": "templates.NOTEBOOK_DIV.render", "line_number": 89, "usage_type": "call"}, {"api_name": "templates.NOTEBOOK_DIV", "line_number": 89, "usage_type": "name"}, {"api_name": "utils.encode_utf8", "line_number": 93, "usage_type": "call"}, {"api_name": "templates.FILE", "line_number": 96, "usage_type": "name"}, {"api_name": "templates.RESOURCES.render", "line_number": 114, "usage_type": "call"}, {"api_name": "templates.RESOURCES", "line_number": 114, "usage_type": "name"}, {"api_name": "resources.js_raw", "line_number": 115, "usage_type": "attribute"}, {"api_name": "resources.css_raw", "line_number": 116, "usage_type": "attribute"}, {"api_name": "resources.js_files", "line_number": 117, "usage_type": "attribute"}, {"api_name": "resources.css_files", "line_number": 118, "usage_type": "attribute"}, {"api_name": "utils.encode_utf8", "line_number": 127, "usage_type": "call"}, {"api_name": "resources.mode", "line_number": 150, "usage_type": "attribute"}, {"api_name": "resources.dev", "line_number": 153, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 156, "usage_type": "call"}, {"api_name": "templates.AUTOLOAD.render", "line_number": 158, "usage_type": "call"}, {"api_name": "templates.AUTOLOAD", "line_number": 158, "usage_type": "name"}, {"api_name": "protocol.serialize_json", "line_number": 159, "usage_type": "call"}, {"api_name": "resources.js_files", "line_number": 160, "usage_type": "attribute"}, {"api_name": "resources.css_files", "line_number": 161, "usage_type": "attribute"}, {"api_name": "templates.AUTOLOAD_STATIC.render", "line_number": 165, "usage_type": "call"}, {"api_name": "templates.AUTOLOAD_STATIC", "line_number": 165, "usage_type": "name"}, {"api_name": "resources.log_level", "line_number": 170, "usage_type": "attribute"}, {"api_name": "utils.encode_utf8", "line_number": 173, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 192, "usage_type": "call"}, {"api_name": "resources.Resources", "line_number": 193, "usage_type": "call"}, {"api_name": "templates.AUTOLOAD_SERVER.render", "line_number": 194, "usage_type": "call"}, {"api_name": "templates.AUTOLOAD_SERVER", "line_number": 194, "usage_type": "name"}, {"api_name": "resources._autoload_path", "line_number": 195, "usage_type": "call"}, {"api_name": "resources.root_url", "line_number": 198, "usage_type": "attribute"}, {"api_name": "resources.log_level", "line_number": 201, "usage_type": "attribute"}, {"api_name": "utils.encode_utf8", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "77862461", "text": "from django.urls import path, reverse\nfrom . import views\n\napp_name = 'mysite'\n\n#url = reverse('page-detail', kwargs={'file_pk': 1, 'page_pk':1})\n\nurlpatterns = [\n path('', views.FileListView.as_view(), name='file_list'),\n path('<str:pk>', views.FilePageListView.as_view(), name='page_list'),\n path('detail/<int:pk>', views.PageDetailView.as_view(), name='page_detail'),\n]", "sub_path": "mysite/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "247838591", "text": "import random, pygame, sys\nfrom pygame.locals import *\n\nFPS = 30\nWINDOWWIDTH = 640\nWINDOWHEIGHT = 480\nREVEALSPEED = 8\nBOXSIZE = 40\nGAPSIZE = 10\nBOARDWIDTH = 10\nBOARDHEIGHT = 7\nassert(BOARDWIDTH * BOARDHEIGHT) % 2 == 0, 'Board must contain even number of boxes'\nXMARGIN = int((WINDOWWIDTH - (BOARDWIDTH * (BOXSIZE +GAPSIZE))))\nYMARGIN = int((WINDOWHEIGHT - (BOARDHEIGHT * ( BOXSIZE + GAPSIZE))))\n\n#COLOURS\nGRAY = (100, 100, 100)\nNAVY_BLUE = (60, 60, 100)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nCYAN = (0, 255, 255)\nPURPLE = (255, 0, 255)\nORANGE = (255, 128, 0)\n\nBGCOLOR = WHITE\nLIGHTBGCOLOR = GRAY\nBOXCOLOR = GRAY\nHIGHLIGHTCOLOR = BLUE\n\nDONUT = 'donut'\nSQUARE = 'square'\nDIAMOND = 'diamond'\nLINES = 'lines'\nOVAL = 'oval'\n\nALLCOLORS = (RED, GREEN, BLUE, YELLOW, CYAN, PURPLE, ORANGE)\nALLSHAPES = (DONUT, SQUARE, DIAMOND, LINES, OVAL)\nassert len(ALLCOLORS) * len(ALLSHAPES) * 2 >= BOARDWIDTH * BOARDHEIGHT, \"Board is too big for the no of the shapes/ colour defined\"\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n mousex = 0\n mousey = 0\n pygame.display.set_caption('Memory Game')\n\n main_board = get_randomized_board()\n revealed_boxes = generate_revealed_boxes(False)\n\n first_selection = None\n\n DISPLAYSURF.fill(BGCOLOR)\n start_game_animation(main_board)\n\n while True:\n mouse_clicked = False\n\n DISPLAYSURF.fill(BGCOLOR)\n draw_board(main_board, revealed_boxes)\n\n for event in pygame.event.get():\n if event.type == QUIT or (event.type == KEYUP and event.type == K_ESCAPE):\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n mousex, mousey = event.pos\n elif event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n mouse_clicked = True\n\n boxx, boxy = get_box_at_pixel(mousex, mousey)\n if boxx != None and boxy != None:\n if not revealed_boxes[boxx][boxy]:\n draw_highlight_box(boxx, boxy)\n if not revealed_boxes[boxx][boxy] and mouse_clicked:\n reveal_boxes_animation(main_board, [(boxx, boxy)])\n revealed_boxes[boxx][boxy] = True\n if first_selection == None:\n first_selection = [boxx, boxy]\n else:\n icon1_shape, icon1_color = get_shape_and_color(main_board, first_selection[0], first_selection[1])\n icon2_shape, icon2_color = get_shape_and_color(main_board, boxx, boxy)\n\n if icon1_shape != icon2_shape or icon1_color != icon2_color:\n pygame.time.wait(1000)\n cover_boxes_animation(main_board, [(first_selection[0], first_selection[1]), (boxx, boxy)])\n revealed_boxes[first_selection[0]][first_selection[1]] = False\n revealed_boxes[boxx][boxy] = False\n elif has_won(revealed_boxes):\n game_won_animation(main_board)\n pygame.time.wait(2000)\n main_board = get_randomized_board()\n revealed_boxes = generate_revealed_boxes(False)\n\n draw_board(main_board, revealed_boxes)\n pygame.display.update()\n pygame.time.wait(500)\n\n start_game_animation(main_board)\n first_selection = None\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\ndef has_won(revealed_boxes):\n for i in revealed_boxes:\n if False in i:\n return False\n return True\n\ndef game_won_animation(board):\n covered_boxes = generate_revealed_boxes(True)\n color1 = LIGHTBGCOLOR\n color2 = BGCOLOR\n for i in range(13):\n color1, color2 = color2, color1\n DISPLAYSURF.fill(color1)\n draw_board(board, covered_boxes)\n pygame.display.update()\n pygame.time.wait(300)\n\ndef draw_highlight_box(boxx, boxy):\n left, top = left_top_coords(boxx, boxy)\n pygame.draw.rect(DISPLAYSURF, HIGHLIGHTCOLOR, (left - 5, top - 5, BOXSIZE + 10, BOXSIZE + 10), 4)\n\ndef get_box_at_pixel(mousex, mousey):\n for x in range(BOARDWIDTH):\n for y in range(BOARDHEIGHT):\n left, top = left_top_coords(x,y)\n box_rect = pygame.Rect(left, top, BOXSIZE, BOXSIZE)\n if box_rect.collidepoint(mousex, mousey):\n return (x,y)\n return (None, None)\n\ndef start_game_animation(board):\n covered_boxes = generate_revealed_boxes(False)\n boxes = []\n for x in range(BOARDWIDTH):\n for y in range(BOARDHEIGHT):\n boxes.append((x,y))\n\n random.shuffle(boxes)\n box_groups = split_into_groups_of(8, boxes)\n\n draw_board(board, covered_boxes)\n for box in box_groups:\n reveal_boxes_animation(board, box)\n cover_boxes_animation(board, box)\n\ndef draw_board(board, revealed):\n for boxx in range(BOARDWIDTH):\n for boxy in range(BOARDHEIGHT):\n left, top = left_top_coords(boxx, boxy)\n if not revealed[boxx][boxy]:\n pygame.draw.rect(DISPLAYSURF, BOXCOLOR, (left, top, BOXSIZE, BOXSIZE))\n else:\n shape, color = get_shape_and_color(board, boxx, boxy)\n draw_icon(shape, color, boxx, boxy)\n\ndef draw_icon(shape, color, boxx, boxy):\n quarter = int (BOXSIZE * 0.25)\n half = int(BOXSIZE * 0.5)\n\n left, top = left_top_coords(boxx, boxy)\n if shape == DONUT:\n pygame.draw.circle(DISPLAYSURF, color, (left + half, top+half), half-5)\n pygame.draw.circle(DISPLAYSURF, color, (left + half, top+half), quarter-5)\n elif shape == SQUARE:\n pygame.draw.rect(DISPLAYSURF, color, (left+quarter, top+quarter, BOXSIZE-half, BOXSIZE-half))\n elif shape == DIAMOND:\n pygame.draw.polygon(DISPLAYSURF, color, ((left + half , top), (left + BOXSIZE - 1, top + half),\n (left + half, top + BOXSIZE -1), (left, top + half)))\n elif shape == LINES:\n for i in range(0, BOXSIZE, 4):\n pygame.draw.line(DISPLAYSURF, color, (left, top + i), (left + i, top))\n pygame.draw.line(DISPLAYSURF, color, (left + i, top + BOXSIZE -1), (left + BOXSIZE -1, top + i))\n elif shape == OVAL:\n pygame.draw.ellipse(DISPLAYSURF, color, (left, top + quarter, BOXSIZE, half))\n\ndef split_into_groups_of(size, box_list):\n result = []\n for i in range(0, len(box_list), size):\n result.append(box_list[i:i+size])\n return result\n\ndef cover_boxes_animation(board, box):\n for x in range(0, BOXSIZE + REVEALSPEED, REVEALSPEED):\n draw_box_covers(board, box, x)\n\ndef reveal_boxes_animation(board, box):\n for x in range(BOXSIZE, (-REVEALSPEED) -1, -REVEALSPEED):\n draw_box_covers(board, box, x)\n\ndef draw_box_covers(board, boxes, x):\n for box in boxes:\n left, top = left_top_coords(box[0], box[1])\n pygame.draw.rect(DISPLAYSURF, BGCOLOR, (left, top, BOXSIZE, BOXSIZE))\n shape, color = get_shape_and_color(board, box[0], box[1])\n draw_icon(shape, color, box[0], box[1])\n if x > 0:\n pygame.draw.rect(DISPLAYSURF, BOXCOLOR, (left, top, x, BOXSIZE))\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\ndef left_top_coords(boxx, boxy):\n left = boxx * (BOXSIZE + GAPSIZE) + XMARGIN\n top = boxy * (BOXSIZE + GAPSIZE) + YMARGIN\n return left, top\n\ndef get_shape_and_color(board, boxx, boxy):\n return board[boxx][boxy][0], board[boxx][boxy][1]\n\ndef generate_revealed_boxes(val):\n revealed_boxes = []\n for i in range(BOARDWIDTH):\n revealed_boxes.append([val]*BOARDHEIGHT)\n return revealed_boxes\n\ndef get_randomized_board():\n icons = []\n for colours in ALLCOLORS:\n for shape in ALLSHAPES:\n icons.append((shape, colours))\n\n random.shuffle(icons)\n no_icons_used = int (BOARDWIDTH * BOARDHEIGHT / 2)\n icons = icons[:no_icons_used] * 2\n random.shuffle(icons)\n\n board = []\n for x in range(BOARDWIDTH):\n col = []\n for y in range(BOARDHEIGHT):\n col.append(icons[0])\n del icons[0]\n board.append(col)\n return board\n\nif __name__ == '__main__':\n main()\n", "sub_path": "memory_game.py", "file_name": "memory_game.py", "file_ext": "py", "file_size_in_byte": 8385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.time.wait", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 123, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 124, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 134, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 146, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 171, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 173, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 175, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 180, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pygame.draw.ellipse", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 201, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 206, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 206, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 229, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 232, "usage_type": "call"}]} +{"seq_id": "240184620", "text": "\"\"\"\nRepresentation of a deterministic finite automaton\n\"\"\"\n\nfrom typing import AbstractSet, Iterable\nfrom collections import deque\n\nimport numpy as np\n\nfrom .state import State\nfrom .symbol import Symbol\nfrom .transition_function import TransitionFunction\nfrom .nondeterministic_finite_automaton import NondeterministicFiniteAutomaton\nfrom .epsilon_nfa import to_single_state\nfrom .finite_automaton import to_state, to_symbol\nfrom .distinguishable_states import DistinguishableStates\nfrom .partition import Partition\nfrom .hopcroft_processing_list import HopcroftProcessingList\n\n\nclass PreviousTransitions(object):\n\n def __init__(self, states, symbols):\n self._to_index_state = dict()\n self._to_index_state[None] = 0\n for i, state in enumerate(states):\n self._to_index_state[state] = i + 1\n self._to_index_symbol = dict()\n for i, symbol in enumerate(symbols):\n self._to_index_symbol[symbol] = i\n self._conversion = np.empty((len(states) + 1, len(symbols)), dtype=object)\n\n def add(self, next0, symbol, state):\n i_next0 = self._to_index_state[next0]\n i_symbol = self._to_index_symbol[symbol]\n if self._conversion[i_next0, i_symbol] is None:\n self._conversion[i_next0, i_symbol] = [state]\n else:\n self._conversion[i_next0, i_symbol].append(state)\n\n def get(self, next0, symbol):\n i_next0 = self._to_index_state[next0]\n i_symbol = self._to_index_symbol[symbol]\n return self._conversion[i_next0, i_symbol] or []\n\n\nclass DeterministicFiniteAutomaton(NondeterministicFiniteAutomaton):\n \"\"\" Represents a deterministic finite automaton\n\n This class represents a deterministic finite automaton.\n\n Parameters\n ----------\n states : set of :class:`~pyformlang.finite_automaton.State`, optional\n A finite set of states\n input_symbols : set of :class:`~pyformlang.finite_automaton.Symbol`, optional\n A finite set of input symbols\n transition_function : :class:`~pyformlang.finite_automaton.TransitionFunction`, optional\n Takes as arguments a state and an input symbol and returns a state.\n start_state : :class:`~pyformlang.finite_automaton.State`, optional\n A start state, element of states\n final_states : set of :class:`~pyformlang.finite_automaton.State`, optional\n A set of final or accepting states. It is a subset of states.\n\n \"\"\"\n\n # pylint: disable=too-many-arguments\n def __init__(self,\n states: AbstractSet[State] = None,\n input_symbols: AbstractSet[Symbol] = None,\n transition_function: TransitionFunction = None,\n start_state: State = None,\n final_states: AbstractSet[State] = None):\n super().__init__(states, input_symbols, None, None, final_states)\n start_state = to_state(start_state)\n self._transition_function = transition_function or TransitionFunction()\n if start_state is not None:\n self._start_state = {start_state}\n else:\n self._start_state = {}\n if start_state is not None:\n self._states.add(start_state)\n\n def add_start_state(self, state: State) -> int:\n \"\"\" Set an initial state\n\n Parameters\n -----------\n state : :class:`~pyformlang.finite_automaton.State`\n The new initial state\n\n Returns\n ----------\n done : int\n 1 is correctly added\n \"\"\"\n state = to_state(state)\n self._start_state = {state}\n self._states.add(state)\n return 1\n\n def remove_start_state(self, state: State) -> int:\n \"\"\" remove an initial state\n\n Parameters\n -----------\n state : :class:`~pyformlang.finite_automaton.State`\n The new initial state\n\n Returns\n ----------\n done : int\n 1 is correctly added\n \"\"\"\n state = to_state(state)\n if {state} == self._start_state:\n self._start_state = {}\n return 1\n return 0\n\n def accepts(self, word: Iterable[Symbol]) -> bool:\n \"\"\" Checks whether the dfa accepts a given word\n\n Parameters\n ----------\n word : iterable of :class:`~pyformlang.finite_automaton.Symbol`\n A sequence of input symbols\n\n Returns\n ----------\n is_accepted : bool\n Whether the word is accepted or not\n \"\"\"\n word = [to_symbol(x) for x in word]\n current_state = None\n if self._start_state:\n current_state = list(self._start_state)[0]\n for symbol in word:\n if current_state is None:\n return False\n current_state = self._transition_function(current_state, symbol)\n if current_state:\n current_state = current_state[0]\n else:\n current_state = None\n return current_state is not None and self.is_final_state(current_state)\n\n def is_deterministic(self) -> bool:\n \"\"\" Checks whether an automaton is deterministic\n\n Returns\n ----------\n is_deterministic : bool\n Whether the automaton is deterministic\n \"\"\"\n return True\n\n def to_deterministic(self) -> \"DeterministicFiniteAutomaton\":\n \"\"\" Transforms the nfa into a dfa\n\n Returns\n ----------\n dfa : :class:`~pyformlang.deterministic_finite_automaton.DeterministicFiniteAutomaton`\n A dfa equivalent to the current nfa\n \"\"\"\n return self\n\n def copy(self) -> \"DeterministicFiniteAutomaton\":\n \"\"\" Copies the current DFA\n\n Returns\n ----------\n enfa : :class:`~pyformlang.finite_automaton.DeterministicFiniteAutomaton`\n A copy of the current DFA\n \"\"\"\n dfa = DeterministicFiniteAutomaton()\n if self._start_state:\n dfa.add_start_state(list(self._start_state)[0])\n for final in self._final_states:\n dfa.add_final_state(final)\n for state in self._states:\n for symbol in self._input_symbols:\n state_to = self._transition_function(state, symbol)\n if state_to:\n state_to = state_to[0]\n else:\n state_to = None\n if state_to is not None:\n dfa.add_transition(state, symbol, state_to)\n return dfa\n\n def _get_distinguishable_states(self):\n \"\"\" Get all the pair of states which are distinguishable\n\n Returns\n ----------\n states : set of (:class:`~pyformlang.finite_automaton.State`,\\\n :class:`~pyformlang.finite_automaton.State`)\n The pair of distinguishable\n \"\"\"\n disting = DistinguishableStates(len(self._states))\n to_process = self._initialize_distinguishable_states_to_process(disting)\n previous_transitions = self._get_previous_transitions()\n append = to_process.append\n not_contains_and_add = disting.not_contains_and_add\n get = previous_transitions.get\n symbols = self._input_symbols\n pop = to_process.pop\n while to_process:\n next0, next1 = pop()\n for symbol in symbols:\n next_states0 = get(next0, symbol)\n next_states1 = get(next1, symbol)\n for state0 in next_states0:\n for state1 in next_states1:\n state_combined = (state0, state1)\n if not_contains_and_add(state_combined):\n append(state_combined)\n return disting\n\n def _initialize_distinguishable_states_to_process(self, disting):\n to_process = deque()\n for final in self._final_states:\n for state in self._states:\n if state not in self._final_states:\n disting.add((final, state))\n to_process.append((final, state))\n disting.add((None, final))\n to_process.append((None, final))\n return to_process\n\n def _get_previous_transitions(self):\n previous_transitions = PreviousTransitions(self._states, self._input_symbols)\n for state in self._states:\n for symbol in self._input_symbols:\n next0 = self._transition_function(state, symbol)\n if next0:\n next0 = next0[0]\n else:\n next0 = None\n previous_transitions.add(next0, symbol, state)\n for symbol in self._input_symbols:\n previous_transitions.add(None, symbol, None)\n return previous_transitions\n\n def _get_reachable_states(self) -> AbstractSet[State]:\n \"\"\" Get all states which are reachable \"\"\"\n to_process = []\n processed = set()\n for state in self._start_state:\n to_process.append(state)\n processed.add(state)\n while to_process:\n current = to_process.pop()\n for symbol in self._input_symbols:\n next_state = self._transition_function(current, symbol)\n if not next_state or next_state[0] in processed:\n continue\n to_process.append(next_state[0])\n processed.add(next_state[0])\n return processed\n\n def minimize(self) -> \"DeterministicFiniteAutomaton\":\n \"\"\" Minimize the current DFA\n\n Returns\n ----------\n dfa : :class:`~pyformlang.deterministic_finite_automaton.DeterministicFiniteAutomaton`\n The minimal DFA\n \"\"\"\n if not self._start_state or not self._final_states:\n return DeterministicFiniteAutomaton()\n # Remove unreachable\n reachables = self._get_reachable_states()\n states = self._states.intersection(reachables)\n # Group the equivalent states\n partition = self._get_partition()\n groups = partition.get_groups()\n # Create a state for this\n to_new_states = dict()\n for group in groups:\n new_state = to_single_state(group)\n for state in group:\n to_new_states[state] = new_state\n # Build the DFA\n dfa = DeterministicFiniteAutomaton()\n for state in self._start_state:\n dfa.add_start_state(to_new_states[state])\n for state in states:\n if state in self._final_states:\n dfa.add_final_state(to_new_states[state])\n done = set()\n new_state = to_new_states[state]\n for symbol in self._input_symbols:\n for next_node in self._transition_function(state, symbol):\n if next_node in states:\n next_node = to_new_states[next_node]\n if (next_node, symbol) not in done:\n dfa.add_transition(new_state, symbol, next_node)\n done.add((next_node, symbol))\n return dfa\n\n def _get_partition(self):\n previous_transitions = self._get_previous_transitions()\n finals = []\n non_finals = []\n for state in self._states:\n if state in self._final_states:\n finals.append(state)\n else:\n non_finals.append(state)\n # None is the trash node\n non_finals.append(None)\n # + 1 for trash node\n partition = Partition(len(self._states) + 1)\n partition.add_class(finals)\n partition.add_class(non_finals)\n # + 1 for trash node\n processing_list = HopcroftProcessingList(len(self._states) + 1, self._input_symbols)\n to_add = 0 # 0 is the index of finals, 1 of non_finals\n if len(non_finals) < len(finals):\n to_add = 1\n for symbol in self._input_symbols:\n processing_list.insert(to_add, symbol)\n while not processing_list.is_empty():\n current_class, current_symbol = processing_list.pop()\n inverse = []\n for element in partition.part[current_class]:\n inverse += previous_transitions.get(element.value, current_symbol)\n for valid_set in partition.get_valid_sets(inverse):\n new_class = partition.split(valid_set, inverse)\n for symbol in self._input_symbols:\n if processing_list.contains(valid_set, symbol):\n processing_list.insert(new_class, symbol)\n elif len(partition.part[valid_set]) < len(partition.part[valid_set]):\n processing_list.insert(valid_set, symbol)\n else:\n processing_list.insert(new_class, symbol)\n return partition\n\n def is_equivalent_to(self, other):\n \"\"\" Check whether two automata are equivalent\n\n Parameters\n ----------\n other : :class:`~pyformlang.deterministic_finite_automaton.FiniteAutomaton`\n A sequence of input symbols\n\n Returns\n ----------\n are_equivalent : bool\n Whether the two automata are equivalent or not\n \"\"\"\n if not isinstance(other, DeterministicFiniteAutomaton):\n other_dfa = other.to_deterministic()\n return self.is_equivalent_to(other_dfa)\n self_minimal = self.minimize()\n other_minimal = other.minimize()\n return self._is_equivalent_to_minimal(self_minimal, other_minimal)\n\n @property\n def start_state(self) -> State:\n return list(self._start_state)[0]\n\n @staticmethod\n def _is_equivalent_to_minimal(self_minimal, other_minimal):\n to_process = [(self_minimal.start_state,\n other_minimal.start_state)]\n matches = {self_minimal.start_state: other_minimal.start_state}\n while to_process:\n current_self, current_other = to_process.pop()\n if (self_minimal.is_final_state(current_self) and not other_minimal.is_final_state(current_other)) or\\\n (not self_minimal.is_final_state(current_self) and other_minimal.is_final_state(current_other)):\n return False\n next_self = self_minimal(current_self)\n next_other = other_minimal(current_other)\n if len(next_self) != len(next_other):\n return False\n if len(next_self) == 0:\n continue\n next_symbol_self, next_state_self = list(next_self)[0]\n next_symbol_other, next_state_other = list(next_other)[0]\n if next_symbol_other != next_symbol_self:\n return False\n if next_state_self in matches:\n if matches[next_state_self] != next_state_other:\n return False\n else:\n matches[next_state_self] = next_state_other\n to_process.append((next_state_self, next_state_other))\n return True\n\n\ndef get_groups(states, distinguishable) -> Iterable[AbstractSet[State]]:\n \"\"\" Get the groups in the minimization \"\"\"\n groups = []\n were_grouped = set()\n states = list(states)\n for state0, state1 in distinguishable.get_non_distinguishable():\n were_grouped.add(state0)\n were_grouped.add(state1)\n new_groups = [{state0, state1}]\n for group in groups:\n if state0 in group or state1 in group:\n new_groups[0] = new_groups[0].union(group)\n else:\n new_groups.append(group)\n groups = new_groups\n return (groups, were_grouped)\n", "sub_path": "pyformlang/finite_automaton/deterministic_finite_automaton.py", "file_name": "deterministic_finite_automaton.py", "file_ext": "py", "file_size_in_byte": 15600, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.empty", "line_number": 31, "usage_type": "call"}, {"api_name": "nondeterministic_finite_automaton.NondeterministicFiniteAutomaton", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.AbstractSet", "line_number": 69, "usage_type": "name"}, {"api_name": "state.State", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.AbstractSet", "line_number": 70, "usage_type": "name"}, {"api_name": "symbol.Symbol", "line_number": 70, "usage_type": "name"}, {"api_name": "transition_function.TransitionFunction", "line_number": 71, "usage_type": "name"}, {"api_name": "state.State", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.AbstractSet", "line_number": 73, "usage_type": "name"}, {"api_name": "state.State", "line_number": 73, "usage_type": "name"}, {"api_name": "finite_automaton.to_state", "line_number": 75, "usage_type": "call"}, {"api_name": "transition_function.TransitionFunction", "line_number": 76, "usage_type": "call"}, {"api_name": "state.State", "line_number": 84, "usage_type": "name"}, {"api_name": "finite_automaton.to_state", "line_number": 97, "usage_type": "call"}, {"api_name": "state.State", "line_number": 102, "usage_type": "name"}, {"api_name": "finite_automaton.to_state", "line_number": 115, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 121, "usage_type": "name"}, {"api_name": "symbol.Symbol", "line_number": 121, "usage_type": "name"}, {"api_name": "finite_automaton.to_symbol", "line_number": 134, "usage_type": "call"}, {"api_name": "distinguishable_states.DistinguishableStates", "line_number": 201, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 222, "usage_type": "call"}, {"api_name": "typing.AbstractSet", "line_number": 246, "usage_type": "name"}, {"api_name": "state.State", "line_number": 246, "usage_type": "name"}, {"api_name": "partition.get_groups", "line_number": 278, "usage_type": "call"}, {"api_name": "epsilon_nfa.to_single_state", "line_number": 282, "usage_type": "call"}, {"api_name": "partition.Partition", "line_number": 315, "usage_type": "call"}, {"api_name": "partition.add_class", "line_number": 316, "usage_type": "call"}, {"api_name": "partition.add_class", "line_number": 317, "usage_type": "call"}, {"api_name": "hopcroft_processing_list.HopcroftProcessingList", "line_number": 319, "usage_type": "call"}, {"api_name": "partition.part", "line_number": 328, "usage_type": "attribute"}, {"api_name": "partition.get_valid_sets", "line_number": 330, "usage_type": "call"}, {"api_name": "partition.split", "line_number": 331, "usage_type": "call"}, {"api_name": "partition.part", "line_number": 335, "usage_type": "attribute"}, {"api_name": "state.State", "line_number": 362, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 394, "usage_type": "name"}, {"api_name": "typing.AbstractSet", "line_number": 394, "usage_type": "name"}, {"api_name": "state.State", "line_number": 394, "usage_type": "name"}]} +{"seq_id": "86848826", "text": "import logging\nimport os\nimport select\nimport SimpleHTTPServer\nimport socket\nimport SocketServer\nimport threading\n\nHERE = os.path.dirname(__file__)\nlogger = logging.getLogger(__name__)\n\n\nclass ThisDirHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n def translate_path(self, path):\n path = path.split('?', 1)[0].split('#', 1)[0]\n return os.path.join(HERE, *filter(None, path.split('/')))\n\n def log_message(self, s, *args):\n # output via logging so nose can catch it\n logger.info(s, *args)\n\n\nclass ShutdownServer(SocketServer.TCPServer):\n \"\"\"Mixin that allows serve_forever to be shut down.\n\n The methods in this mixin are backported from SocketServer.py in the Python\n 2.6.4 standard library. The mixin is unnecessary in 2.6 and later, when\n BaseServer supports the shutdown method directly.\n \"\"\"\n\n def __init__(self, use_tls, *args, **kwargs):\n self.__use_tls = use_tls\n SocketServer.TCPServer.__init__(self, *args, **kwargs)\n self.__is_shut_down = threading.Event()\n self.__serving = False\n\n def server_bind(self):\n SocketServer.TCPServer.server_bind(self)\n if self.__use_tls:\n import ssl\n self.socket = ssl.wrap_socket(self.socket,\n os.path.join(os.path.dirname(__file__), 'server.key'),\n os.path.join(os.path.dirname(__file__), 'server.pem'),\n True\n )\n\n\n def serve_forever(self, poll_interval=0.1):\n \"\"\"Handle one request at a time until shutdown.\n\n Polls for shutdown every poll_interval seconds. Ignores\n self.timeout. If you need to do periodic tasks, do them in\n another thread.\n \"\"\"\n self.__serving = True\n self.__is_shut_down.clear()\n while self.__serving:\n r, w, e = select.select([self.socket], [], [], poll_interval)\n if r:\n self._handle_request_noblock()\n self.__is_shut_down.set()\n\n def shutdown(self):\n \"\"\"Stops the serve_forever loop.\n\n Blocks until the loop has finished. This must be called while\n serve_forever() is running in another thread, or it will deadlock.\n \"\"\"\n self.__serving = False\n self.__is_shut_down.wait()\n\n def handle_request(self):\n \"\"\"Handle one request, possibly blocking.\n\n Respects self.timeout.\n \"\"\"\n # Support people who used socket.settimeout() to escape\n # handle_request before self.timeout was available.\n timeout = self.socket.gettimeout()\n if timeout is None:\n timeout = self.timeout\n elif self.timeout is not None:\n timeout = min(timeout, self.timeout)\n fd_sets = select.select([self], [], [], timeout)\n if not fd_sets[0]:\n self.handle_timeout()\n return\n self._handle_request_noblock()\n\n def _handle_request_noblock(self):\n \"\"\"Handle one request, without blocking.\n\n I assume that select.select has returned that the socket is\n readable before this function was called, so there should be\n no risk of blocking in get_request().\n \"\"\"\n try:\n request, client_address = self.get_request()\n except socket.error:\n return\n if self.verify_request(request, client_address):\n try:\n self.process_request(request, client_address)\n except:\n self.handle_error(request, client_address)\n self.close_request(request)\n\n\ndef start_server(handler, use_tls=False):\n httpd = ShutdownServer(use_tls, (\"\", 0), handler)\n threading.Thread(target=httpd.serve_forever).start()\n _, port = httpd.socket.getsockname()\n return httpd, port\n", "sub_path": "third_party/gsutil/third_party/httplib2/python2/httplib2/test/miniserver.py", "file_name": "miniserver.py", "file_ext": "py", "file_size_in_byte": 3779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "SimpleHTTPServer.SimpleHTTPRequestHandler", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "SocketServer.TCPServer", "line_number": 23, "usage_type": "attribute"}, {"api_name": "SocketServer.TCPServer.__init__", "line_number": 33, "usage_type": "call"}, {"api_name": "SocketServer.TCPServer", "line_number": 33, "usage_type": "attribute"}, {"api_name": "threading.Event", "line_number": 34, "usage_type": "call"}, {"api_name": "SocketServer.TCPServer.server_bind", "line_number": 38, "usage_type": "call"}, {"api_name": "SocketServer.TCPServer", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ssl.wrap_socket", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 43, "usage_type": "call"}, {"api_name": "select.select", "line_number": 58, "usage_type": "call"}, {"api_name": "select.select", "line_number": 84, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 99, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "399991057", "text": "import os\n\nimport requests\nfrom flask import Flask, jsonify\nfrom flask.ext.cache import Cache\n\n# Construct the app and load settings\napp = Flask(__name__)\napp.config.from_pyfile('settings.py')\n\n# Construct the cache object\ncache = Cache(app)\n\n@app.route(\"/posts/<username>\")\ndef posts(username):\n \"\"\"Endpoint for fetching the Facebook posts of a given user\"\"\"\n\n posts = get_posts(username=username, retry=True) \n return jsonify(posts)\n\ndef get_posts(username, retry):\n \"\"\"Retrieve the Facebook posts of a given user\"\"\"\n\n # The collection of posts to be returned to the client\n posts = {}\n\n # Execute the HTTP request\n r = requests.get(app.config['FB_POSTS_URL'] % (username, get_access_token()))\n\n # Sanity check the response status\n if r.status_code == requests.codes.ok:\n try:\n posts = r.json()\n except:\n pass\n elif retry and 400 <= r.status_code < 500:\n # We have a 4xx error which most likely indicates an invalid token.\n # Clear the cache so we'll get a new token when retrying.\n cache.clear()\n\n # Retry fetching the posts\n posts = get_posts(username, retry=False)\n \n return posts\n\ndef get_access_token():\n \"\"\"Retrieve an access token for the mobile app\"\"\"\n\n # Pull the access token out of the cache\n access_token = cache.get(app.config['CACHE_KEY'])\n \n # Check if an access token was returned from the cache\n if not access_token or len(access_token) == 0:\n # No access token, so fetch one\n r = requests.get(app.config['FB_TOKEN_URL'])\n\n # Sanity check the response status\n if r.status_code == requests.codes.ok:\n try:\n # Extract the access token\n access_token = r.text.split('=')[1]\n\n # Cache the access token if it is valid\n if access_token and len(access_token) > 0:\n cache.set(app.config['CACHE_KEY'], access_token)\n except:\n pass\n\n return access_token\n\ndef clear_cache():\n \"\"\"Clears the cache\"\"\"\n with app.app_context():\n cache.clear()\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8181, debug=True)\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2219, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.ext.cache.Cache", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 31, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 55, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "98657841", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 18 11:07:13 2019\n\n@author: mrdra\n\"\"\"\n\nfrom pykinect2 import PyKinectV2\nfrom pykinect2.PyKinectV2 import *\nfrom pykinect2 import PyKinectRuntime\n\nimport ctypes\nimport cv2\nimport numpy as np\nimport _ctypes\nimport sys\nimport time\nimport threading\nimport queue\n\n\nclass Closest_Body_Frame(object):\n def __init__(self, body_frame, engage_min, engage_max):\n self.body_frame = body_frame\n self.engage_min = engage_min\n self.engage_max = engage_max\n self.engaged = False\n self.bodies_tracked = 0\n self.closest_body = None\n \n tracked_bodies = {}\n for body in self.body_frame.bodies:\n if body.is_tracked:\n tracked_bodies[self.distance_from_kinect(body)] = body\n self.bodies_tracked += 1\n \n if self.bodies_tracked > 0:\n self.closest_body = tracked_bodies[min(tracked_bodies.keys())] \n self.engaged = self.is_engaged(self.closest_body)\n \n def distance_from_kinect(self, body):\n pos = body.joints[JointType_SpineBase].Position\n return pos.x**2 + pos.y**2 + pos.z**2\n \n def is_engaged(self, body):\n if body is None:\n return False\n dist = self.distance_from_kinect(body)\n return self.engage_min < dist and self.engage_max > dist\n\n def check_for_bodies(self):\n if self.bodies_tracked > 0:\n return True\n return False\n\nclass Preprocessed_Frame(object):\n def __init__(self, frame, left_mask, right_mask, timestamp, engagement):\n self.frame = frame\n self.left_mask = left_mask\n self.right_mask = right_mask\n self.timestamp = timestamp\n self.engagement = engagement\n\n def masks_valid(self):\n return self.left_mask_valid and self.right_mask_valid\n\n def left_mask_valid(self):\n if left_mask:\n return True\n return False\n\n def right_mask_valid(self):\n if right_mask:\n return True\n return False\n\nclass CANet_Preprocessor(threading.Thread):\n def __init__(self, engage_min, engage_max):\n threading.Thread.__init__(self)\n self.kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Body)\n self.engage_min = engage_min\n self.engage_max = engage_max\n self.cbf = None\n self.df = None\n self.fx = 288.03\n self.fy = 287.07\n self.cube_size = 396\n self.fallback_size = 200\n self.frames = []\n self.queue = queue.Queue()\n self.event = threading.Event()\n self.frame = None\n\n def get_hand_positions(self):\n joint_points = self.kinect.body_joints_to_depth_space(self.cbf.closest_body.joints)\n return joint_points[JointType_HandRight], joint_points[JointType_HandLeft]\n\n def new_depth_frame_arrived(self):\n return self.kinect.has_new_depth_frame()\n\n def new_body_frame_arrived(self):\n return self.kinect.has_new_body_frame()\n\n def get_frame(self):\n self.event.clear()\n return self.frame\n\n def event_set(self):\n return self.event.is_set()\n\n def body_engaged(self):\n if self.cbf:\n return self.cbf.engaged\n return False\n \n def segment(self, hand_pos):\n x_start = 0\n y_start = 0\n x_end = self.fallback_size\n y_end = self.fallback_size\n mask = np.zeros(self.df.shape)\n depth_valid = True\n \n x = int(hand_pos.x)\n y = int(hand_pos.y)\n \n if x < 0 or x >= mask.shape[1] or y < 0 or y >= mask.shape[0]:\n return mask\n z = self.df[y, x]\n \n if z == 0:\n depth_valid = False\n \n if depth_valid:\n x_start = int(x - (self.cube_size*self.fx)/(2*z))\n x_end = int(x + (self.cube_size*self.fx)/(2*z))\n\n y_start = int(y - (self.cube_size*self.fy)/(2*z))\n y_end = int(y + (self.cube_size*self.fy)/(2*z))\n \n mask[max(y_start, 0):min(y_end, mask.shape[0]-1), max(x_start, 0):min(x_end, mask.shape[1]-1)] = 255\n \n return mask\n\n def run(self):\n while True:\n if self.kinect.has_new_body_frame():\n self.cbf = Closest_Body_Frame(self.kinect.get_last_body_frame(), self.engage_min, self.engage_max)\n\n if self.cbf and self.cbf.check_for_bodies():\n if self.kinect.has_new_depth_frame():\n self.df = self.kinect.get_last_depth_frame()\n timestamp = time.time()\n self.df = np.resize(self.df, (424, 512))\n right_pos, left_pos = self.get_hand_positions()\n left_mask = self.segment(left_pos)\n right_mask = self.segment(right_pos)\n\n self.frame = Preprocessed_Frame(self.df, left_mask, right_mask, timestamp, self.cbf.engaged)\n self.event.set()\n\n\n\nclass_instance = CANet_Preprocessor(0.0, 10.0)\nclass_instance.start()\ntotal_time = 0\nframe_count = 0\nstart = time.time()\nwhile True:\n if class_instance.event.is_set():\n start = time.time()\n frame_count+=1\n class_instance.event.clear()\n frame = class_instance.frame\n print(1/(time.time() - start+.00001))\n cv2.imshow(\"frame\", frame.frame)\n cv2.imshow(\"left mask\", np.multiply(frame.left_mask, frame.frame))\n cv2.imshow(\"right mask\", np.multiply(frame.right_mask, frame.frame))\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n", "sub_path": "BlocksWorld/Assets/_Core/Scripts/Perception/CANet/CANet_preprocessing.py", "file_name": "CANet_preprocessing.py", "file_ext": "py", "file_size_in_byte": 5634, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "threading.Thread", "line_number": 77, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 79, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pykinect2.PyKinectRuntime.PyKinectRuntime", "line_number": 80, "usage_type": "call"}, {"api_name": "pykinect2.PyKinectRuntime", "line_number": 80, "usage_type": "name"}, {"api_name": "pykinect2.PyKinectV2.FrameSourceTypes_Depth", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pykinect2.PyKinectV2", "line_number": 80, "usage_type": "name"}, {"api_name": "pykinect2.PyKinectV2.FrameSourceTypes_Body", "line_number": 80, "usage_type": "attribute"}, {"api_name": "queue.Queue", "line_number": 90, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "time.time", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.resize", "line_number": 154, "usage_type": "call"}, {"api_name": "time.time", "line_number": 168, "usage_type": "call"}, {"api_name": "time.time", "line_number": 171, "usage_type": "call"}, {"api_name": "time.time", "line_number": 175, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 176, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 177, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "278855297", "text": "# -*- coding: utf-8 -*-\nfrom collections import Counter,OrderedDict\n\nimport xlrd\nfrom openpyxl import load_workbook\n\n\nclass Data:\n\n def __init__(self, date, path):\n self.gather_file = load_workbook(filename=u'FE+FC+FD.xlsx', guess_types=True)\n self.all_file = xlrd.open_workbook(path+date+'all.xls')\n self.suc_file = xlrd.open_workbook(path+date + '00.xls')\n self.fe_file = xlrd.open_workbook(path+date + 'fe.xls')\n self.fc_file = xlrd.open_workbook(path+date + 'fc.xls')\n self.fd_file = xlrd.open_workbook(path+date + 'fd.xls')\n self.date = date\n\n self.err_dicts = {}\n\n def update_gather(self):\n # gather_rows = self.count_all(self.gather_file) + 3\n all_count = self.count_all(self.all_file)\n suc_count = self.count_all(self.suc_file)\n fe_count = self.count_all(self.fe_file)\n fc_count = self.count_all(self.fc_file)\n fd_count = self.count_all(self.fd_file)\n err_count = fe_count+fc_count+fd_count\n\n err_rate = float(err_count)/float(all_count)\n fe_rate = float(fe_count)/float(err_count)\n fc_rate = float(fc_count) / float(err_count)\n fd_rate = float(fd_count) / float(err_count)\n\n fe_concentration = self.calculate_concentration(self.fe_file, 'fe')\n fc_concentration = self.calculate_concentration(self.fc_file, 'fc')\n fd_concentration = self.calculate_concentration(self.fd_file, 'fd')\n\n gather_sheet = self.gather_file.worksheets[4]#待定\n gather_rows = str(gather_sheet.max_row + 1)\n\n date = self.modify_date()\n gather_sheet['A' + gather_rows] = date\n gather_sheet['B' + gather_rows] = fe_count\n gather_sheet['C' + gather_rows] = fc_count\n gather_sheet['D' + gather_rows] = fd_count\n gather_sheet['E' + gather_rows] = err_count\n gather_sheet['F' + gather_rows] = suc_count\n gather_sheet['G' + gather_rows] = all_count\n\n gather_sheet['H' + gather_rows] = err_rate\n gather_sheet['I' + gather_rows] = fe_concentration\n gather_sheet['J' + gather_rows] = fc_concentration\n gather_sheet['K' + gather_rows] = fd_concentration\n gather_sheet['L' + gather_rows] = 0.25\n gather_sheet['M' + gather_rows] = fe_rate\n gather_sheet['N' + gather_rows] = fc_rate\n gather_sheet['O' + gather_rows] = fd_rate\n for c in range(ord('H'), ord('O')+1):\n gather_sheet[chr(c) + gather_rows].number_format = \"##.#%\"\n\n self.update_err_sheet('fe', 0)\n self.update_err_sheet('fc', 1)\n self.update_err_sheet('fd', 2)\n self.gather_file.save(u'FE+FC+FD.xlsx')\n\n def modify_date(self):\n arr = self.date.split(r'-')\n m = int(arr[1])\n d = arr[2]\n date = str(m) + u'月' + d + u'日'\n return date\n\n def update_err_sheet(self, type, sheet_index):\n err_sheet = self.gather_file.worksheets[sheet_index]\n ncols = err_sheet.max_column+1\n self.write_err_sheet(type, err_sheet, ncols)\n\n def write_err_sheet(self, type, err_sheet, ncols):\n err_sheet.cell(row=1, column=ncols).value = self.date+u'车辆编号'\n err_sheet.cell(row=1, column=ncols+1).value = u'计数'\n\n n = 2\n for k, v in self.err_dicts[type].iteritems():\n err_sheet.cell(row=n, column=ncols).value = k\n err_sheet.cell(row=n, column=ncols+1).value = v\n n += 1\n\n def count_all(self, workbook):\n n_rows = 0\n i = 0\n for sheet in workbook.sheets():\n table = sheet\n n_rows += table.nrows\n i += 1\n return n_rows - 2 * i - 1\n\n def calculate_concentration(self, workbook, type):\n error_count = self.count_all(workbook)\n\n bus_num_counter = Counter()\n n_sheets = workbook.sheets().__len__() # 页数\n for sheet in workbook.sheets():\n table = sheet\n n_rows = table.nrows\n if n_sheets == 1:\n n_rows -= 1 # 最后一页最后一行不算\n for i in range(2, n_rows):\n bus_num = table.cell(i, 5).value.encode(\"utf-8\")\n bus_num_counter[bus_num] += 1\n n_sheets -= 1\n\n bus_num_dict = OrderedDict(bus_num_counter.most_common())\n\n #列表词典\n self.err_dicts[type] = bus_num_dict\n\n bus_num_total_count = bus_num_dict.__len__() # 出现车辆数\n\n #前25%车辆\n top_quarter = bus_num_total_count/4 if bus_num_total_count%4 == 0 else bus_num_total_count/4 +1\n top_quarter_bus = OrderedDict(bus_num_counter.most_common(top_quarter))\n #前25%车辆出现的故障次数\n quarter_bus_errors = 0\n for k, v in top_quarter_bus.iteritems():\n freq = int(v)\n quarter_bus_errors += freq\n\n # 修复2017-08-05除数为0的bug\n if error_count == 0:\n return 0.0000\n return float(quarter_bus_errors)/float(error_count)\n\nif __name__ == '__main__':\n import datetime\n file_path = r'C:\\DownLoads\\HZBUS\\\\'\n date = datetime.date(2017, 9, 23).strftime(\"%Y-%m-%d\")\n data = Data(date, file_path)\n data.update_gather()", "sub_path": "HZBus/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 5194, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 11, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 12, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 13, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 14, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 15, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 101, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 113, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "166543099", "text": "# -*- coding:utf-8 -*-\n\"\"\"\nauthor: byangg\ndatettime: 2020/5/15 9:49\n\"\"\"\n\n# -*- coding:utf-8 -*-\n\"\"\"\nauthor: byangg\ndatettime: 2020/3/20 14:00\n\"\"\"\nimport json\nimport string\nimport datetime\nfrom copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom _plotly_utils.utils import PlotlyJSONEncoder\nfrom dateutil.relativedelta import relativedelta\nfrom django.urls import reverse\n\nfrom .utils import engine, get_cut_val\n\nimport os\n\nos.environ['DJANGO_SETTINGS_MODULE'] = \"AppraisalSystem.settings\"\n\nTABLE_COLS = ['lastname', 'score_ori', 'score', 'score_class',\n 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9', 'v10', 'v11',\n 'point_ori', 'point', 'point_class']\nTABLE_COLS_EMP = ['lastname', 'score_ori', 'score', 'score_class',\n 'v1', 'v2', 'v3', 'v4', 'v5',\n 'point_ori', 'point', 'point_class']\nSHOW_COLS = {'lastname': '姓名', 'score_ori': '测评分', 'score': '测评排名', 'score_class': '测评等级',\n 'v1': '客户意识(15)', 'v2': '成本意识(10)', 'v3': '责任心(10)', 'v4': '日清日毕(10)', 'v5': '坚持力(10)',\n 'v6': '领导力(10)', 'v7': '学习创新(10)', 'v8': '团队协作(10)', 'v9': '公平公正(5)', 'v10': '廉洁诚信(5)', 'v11': '微笑服务(5)',\n 'point_ori': '积分', 'point': '积分排名', 'point_class': '积分等级',\n 'txrName': '测评人', 'total': '总分'\n }\n\nSHOW_COLS_EMP = {'lastname': '姓名', 'score_ori': '测评分', 'score': '测评排名', 'score_class': '测评等级',\n 'v1': '责任心(满分30)', 'v2': '执行力(满分20)', 'v3': '团队协作(满分20)', 'v4': '工作不推诿(满分20)', 'v5': '日清日毕(满分10)',\n 'point_ori': '积分', 'point': '积分排名', 'point_class': '积分等级',\n 'txrName': '测评人', 'total': '总分'\n }\n\nPERM_COLS = ['txrName', 'total', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9', 'v10', 'v11']\nPERM_COLS_EMP = ['txrName', 'total', 'v1', 'v2', 'v3', 'v4', 'v5']\n\n\ndef get_areas():\n df_area = pd.DataFrame([[1, 1, 2, 2, 1], [1, 2, 2, 1, 1]]).T * 10\n\n df_area1 = df_area.copy()\n\n df_area[0] += 40\n df_area[1] += 40\n\n df_area1[0] += 70\n df_area1[1] += 70\n\n return df_area, df_area1\n\n\ndef stuff_plot(username):\n # username = 'name1'\n # sql = f\"select * from test_data where username='{username}'\"\n #\n # df = pd.read_sql(sql, engine)\n # areas = get_areas()\n\n nums = 20\n df = pd.DataFrame(np.random.randn(nums, 2) * 10 + 70, dtype=np.int,\n columns=['x', 'y'], index=list(range(nums)))\n\n df['name'] = [f'员工_{string.ascii_uppercase[i]}' for i in range(nums)]\n df = df.groupby(['x', 'y'])['name'].agg(\n lambda x: ','.join(x)).reset_index()\n\n df_area = pd.DataFrame([[1, 1, 2, 2, 1], [1, 2, 2, 1, 1]]).T * 10\n\n df_area1 = df_area.copy()\n\n df_area[0] += 40\n df_area[1] += 40\n\n df_area1[0] += 70\n df_area1[1] += 70\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(x=df_area[0], y=df_area[1], line_color='Coral',\n mode='lines', fill='toself', fillcolor='LightSalmon',\n name='Warn'))\n fig.add_trace(\n go.Scatter(x=df_area1[0], y=df_area1[1], line_color='lightskyblue',\n mode='lines', fill='toself', name='Pef'))\n fig.add_trace(go.Scatter(x=df['x'], y=df['y'], text=df['name'],\n mode='markers + text', textposition='top center'))\n\n fig.update_layout(showlegend=False, height=800, )\n\n # fig.show()\n\n graphJSON = json.dumps(fig, cls=PlotlyJSONEncoder)\n\n return graphJSON\n\n # plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n # return plot_div\n\n\ndef get_area(df):\n # xs = np.quantile(df.score.astype(float), [0, 0.25, 0.5, 0.75, 1])\n # ys = np.quantile(df.point.astype(float), [0, 0.25, 0.5, 0.75, 1])\n xs = get_cut_val(df)\n if not xs: return None\n\n xs = [max(xs) - v for v in xs]\n ys = xs\n\n # xs[0] -= 5\n # xs[-1] += 5\n # ys[0] -= 5\n # ys[-1] += 5\n\n area1_x = [xs[0], xs[1], xs[1], xs[0], xs[0]]\n area1_y = [ys[0], ys[0], ys[1], ys[1], ys[0]]\n\n area2_x = [xs[4], xs[2], xs[2], xs[1], xs[1], xs[0], xs[0], xs[1], xs[1], xs[4], xs[4]]\n area2_y = [ys[1], ys[1], ys[2], ys[2], ys[4], ys[4], ys[1], ys[1], ys[0], ys[0], xs[1]]\n\n area3_x = [xs[4], xs[3], xs[3], xs[1], xs[1], xs[2], xs[2], xs[4], xs[4]]\n area3_y = [ys[3], ys[3], ys[4], ys[4], ys[2], ys[2], ys[1], ys[1], ys[3]]\n\n area4_x = [xs[3], xs[4], xs[4], xs[3], xs[3]]\n area4_y = [ys[3], ys[3], ys[4], ys[4], ys[3]]\n\n return area1_x, area1_y, \\\n area2_x, area2_y, \\\n area3_x, area3_y, \\\n area4_x, area4_y\n\n\n# def get_data(month, category):\n# year = int(month[:4])\n# month = int(month[5:])\n# sql = f'''\n# select loginid,\n# lastname,\n# total score, point,\n# v1 ,v2 ,v3 ,v4 ,v5 ,v6 ,v7 ,v8 ,v9 ,v10,v11,\n# departmentid,\n# row_number() over (order by total desc) num\n# from cp_result where years={year} and months={month}\n# and category={category}\n# '''\n# df = pd.read_sql(sql, engine)\n# area1_x, area1_y, area2_x, area2_y = get_area(df)\n# return df, area1_x, area1_y, area2_x, area2_y\n\ndef get_data(date, category, depart=None):\n year = int(date[:4])\n month = int(date[5:])\n\n # sql = f'''\n # select\n # lastname,\n # total score_ori,\n # point point_ori,\n # cp_result.*,\n # loginid,\n # HrmDepartment.departmentname, HrmResource.departmentid,\n # rank() over (partition by years,months order by total desc) score,\n # rank() over (partition by years,months order by total) score1,\n # rank() over (partition by years,months order by point desc) point,\n # rank() over (partition by years,months order by point) point1\n # from cp_result\n # INNER JOIN HrmResource ON HrmResource.id = btprid\n # INNER JOIN a_CpYgDepBind ON a_CpYgDepBind.childId = HrmResource.departmentid\n # INNER JOIN HrmDepartment ON HrmDepartment.id = a_CpYgDepBind.supdepId\n # INNER JOIN tmp_point on loginid = tmp_point.userAccount\n # WHERE years = {year} AND months = {month} and category = {category}\n # AND HrmResource.status in (0,1,2,3)\n # '''\n\n sql = f'''\n select * from result_all\n where years = {year} AND months = {month} and category = {category}\n '''\n\n if category == 6:\n pass\n elif category == 7:\n assert depart is not None\n sql += f\" and supdepId = {depart}\"\n else:\n raise ValueError(f\"Cant find the category of {category}\")\n\n sql += \" order by score_ori desc\"\n\n print(sql)\n # df_ap = pd.read_sql(sql, engine)\n # df_point = get_point(date) # todo 使用字典保存每个月的数据\n # df = pd.merge(df_ap, df_point, left_on='loginid', right_on='userAccount')\n df = pd.read_sql(sql, engine)\n df['hover_txt'] = '测评:' + df.score.astype(str) + f'/{len(df)}' + \",积分:\" + + df.point.astype(str) + f'/{len(df)}'\n # area1_x, area1_y, area2_x, area2_y = get_area(df)\n areas = get_area(df)\n return df, areas\n\n\ndef get_base_chart(month, group, depart=None):\n df, areas = get_data(month, group, depart)\n df['url'] = \"<a target='_parent' href='/appraisal/dtl?name=\" + df.loginid + f\"&month={month}\" \\\n + f\"&group={group}\" + f\"&depart={depart}\" + \"'>\" + df.lastname + \"</a>\"\n # df['url'] = \"<a href='\" + reverse(\"dtl\") + \"?name=\" + df.loginid + f\"&month={month}\" \\\n # + f\"&group={group}\" + f\"&depart={depart}\" + \"'>\" + df.lastname + \"</a>\"\n point = go.Scatter(x=df.score1, y=df.point1, mode='markers + text', marker=dict(color='blue'),\n text=df.url, textposition='top center',\n hovertext=df['hover_txt'],\n hoverinfo='text'\n )\n if areas:\n # https://www.color-hex.com/color/7fffd4\n str_colors = \"#349bff #66d8ff #7fdfff #99e5ff\"\n colors = str_colors.split(\" \")\n area1_x, area1_y, area2_x, area2_y, area3_x, area3_y, area4_x, area4_y = areas\n area1 = go.Scatter(x=area1_x, y=area1_y, line_color=colors[3], fillcolor=colors[3],\n mode='lines', fill='toself', name='Perfect')\n area2 = go.Scatter(x=area2_x, y=area2_y, line_color=colors[2], fillcolor=colors[2],\n mode='lines', fill='toself', name='Medium')\n area3 = go.Scatter(x=area3_x, y=area3_y, line_color=colors[1], fillcolor=colors[1],\n mode='lines', fill='toself', name='Normal')\n area4 = go.Scatter(x=area4_x, y=area4_y, line_color=colors[0], fillcolor=colors[0],\n mode='lines', fill='toself', name='Pool')\n\n fig = go.Figure([area1, area2, area3, area4, point])\n else:\n fig = go.Figure([point])\n fig.update_layout(xaxis={\n 'title': '测评'},\n yaxis={'title': '积分'},\n autosize=False,\n width=980,\n height=800,\n showlegend=False)\n return df, fig\n\n\ndef get_loginid_id():\n df = pd.read_sql('''\n select loginid, id from HrmResource\n ''', engine)\n\n loginid_id = {l: i for l, i in df.values}\n return loginid_id\n\n\ndef get_html(df):\n # html = df.to_html(index=False, classes='table-striped', border=0).replace(\n # 'dataframe', 'table').replace('<tr>', '<tr style=\" white-space:nowrap\" class=\"text-center\">'). \\\n # replace('<tr style=\"text-align: right;\">', '<tr style=\" white-space:nowrap\" class=\"text-center\">')\n\n html = df.to_html(index=False, classes='table-striped', border=0).replace(\n 'dataframe', 'table').replace('<tr>', '<tr style=\" white-space:nowrap\" class=\"text-center\">'). \\\n replace('<tr style=\"text-align: right;\">', '<tr style=\" white-space:nowrap\" class=\"text-center\">')\n\n return html\n\n\nclass ChartsGallery():\n charts = {}\n dataframes = {}\n\n charts_emp = {}\n dataframes_emp = {}\n\n loginid_id = get_loginid_id()\n\n def initialize_chart(self, month, group):\n df, chart = get_base_chart(month, group)\n self.charts[(month, group)] = chart\n self.dataframes[(month, group)] = df\n\n def initialize_chart_emp(self, month, group, depart):\n df, chart = get_base_chart(month, group, depart)\n self.charts_emp[(month, group, depart)] = chart\n self.dataframes_emp[(month, group, depart)] = df\n\n def _get_department_auth(self, username):\n # todo 权限\n pass\n\n def get_chart_gb(self, name, month, department=None, group=None):\n if not (month, group) in self.charts:\n self.initialize_chart(month, group)\n chart = deepcopy(self.charts[(month, group)]) # 需要做copy\n\n if not department:\n indices = list(self.dataframes[(month, group)].loginid == name)\n else:\n indices = list(self.dataframes[(month, group)].departmentid.isin(department))\n\n text = [self.dataframes[(month, group)].url[i] if v else None for i, v in enumerate(indices)]\n hover_txt = [self.dataframes[(month, group)].hover_txt[i] if v else None for i, v in enumerate(indices)]\n chart.data[-1].update({'text': text, 'hovertext': hover_txt})\n graphJason = json.dumps(chart, cls=PlotlyJSONEncoder)\n return graphJason, self.dataframes[(month, group)][indices]\n\n def get_chart_emp(self, name, month, department=None, group=None, depart=None):\n if not (month, group, depart) in self.charts_emp:\n self.initialize_chart_emp(month, group, depart)\n\n chart = deepcopy(self.charts_emp[(month, group, depart)]) # 需要做copy\n\n if not department:\n indices = list(self.dataframes_emp[(month, group, depart)].loginid == name)\n else:\n indices = list(self.dataframes_emp[(month, group, depart)].departmentid.isin(department))\n\n text = [self.dataframes_emp[(month, group, depart)].url[i] if v else None for i, v in enumerate(indices)]\n hover_txt = [self.dataframes_emp[(month, group, depart)].hover_txt[i] if v else None for i, v in\n enumerate(indices)]\n chart.data[-1].update({'text': text, 'hovertext': hover_txt})\n graphJason = json.dumps(chart, cls=PlotlyJSONEncoder)\n return graphJason, self.dataframes_emp[(month, group, depart)][indices]\n\n def get_chart(self, name, month, department=None, group=None, sup_depart=None):\n if group == 6:\n graph, df = self.get_chart_gb(name, month, department, group)\n\n table = df[TABLE_COLS]\n table = get_html(table.rename(columns=SHOW_COLS))\n return graph, table\n if group == 7:\n graph, df = self.get_chart_emp(name, month, department, group, depart=sup_depart)\n table = df[TABLE_COLS_EMP]\n table = get_html(table.rename(columns=SHOW_COLS_EMP))\n return graph, table\n\n raise ValueError\n\n def get_chart_and_dtl(self, name, month, group=7, sup_depart=None, is_sup_perm=False, isDetails=False):\n graph, table = self.get_chart(name, month, group=group, sup_depart=sup_depart)\n if is_sup_perm:\n cp_dtl = self.get_dtl_table(name, month, group, isDetails)\n else:\n cp_dtl = None\n\n return graph, table, cp_dtl\n\n def get_dtl_table(self, name, month, group, isDetails=False):\n id = self.loginid_id[name]\n cur_date = datetime.date(int(month[:4]), int(month[5:]), 1)\n\n start_date = cur_date + relativedelta(day=31) + datetime.timedelta(days=1)\n end_date = start_date + relativedelta(day=31) + datetime.timedelta(days=1)\n\n if isDetails:\n dtails = \"t5.lastname as txrName\"\n else:\n dtails = \"'******' as txrName\"\n\n if group == 7:\n sql = f'''\n -- 员工\n SELECT -- t1.txr,\n {dtails},\n -- t2.bsbtpr,\n -- t4.lastname as btprName,\n -- t4.loginid,\n (convert(Int, s1.name) + convert(Int, s2.name) + convert(Int, s3.name)\n + convert(Int, s4.name) + convert(Int, s5.name))\n as total,\n s1.name as v1,\n s2.name as v2,\n s3.name as v3,\n s4.name as v4,\n s5.name as v5\n FROM formtable_main_130 t1\n INNER JOIN formtable_main_130_dt1 t2 ON t2.mainid = t1.id\n\n INNER JOIN workflow_currentoperator t3 ON t3.requestid = t1.requestId\n INNER JOIN HrmResource t4 ON t4.id = t2.bsbtpr\n INNER JOIN HrmResource t5 ON t5.id = t1.txr\n\n INNER JOIN a_CpYgDepBind t6 ON t6.childId = t4.departmentid\n INNER JOIN a_CpYgDepBind t7 ON t7.childId = t5.departmentid\n\n INNER JOIN mode_selectitempagedetail s1 ON s1.mainid = 22 AND s1.disorder = t2.f1\n INNER JOIN mode_selectitempagedetail s2 ON s2.mainid = 23 AND s2.disorder = t2.f2\n INNER JOIN mode_selectitempagedetail s3 ON s3.mainid = 23 AND s3.disorder = t2.f3\n INNER JOIN mode_selectitempagedetail s4 ON s4.mainid = 23 AND s4.disorder = t2.f4\n INNER JOIN mode_selectitempagedetail s5 ON s5.mainid = 23 AND s5.disorder = t2.f5\n WHERE\n --被打分人\n t2.bsbtpr = {id}\n AND t1.txrq >= '{start_date}'\n AND t1.txrq < '{end_date}'\n AND t6.supdepId = t7.supdepId\n\n AND (t3.isremark = 4)\n AND (t3.iscomplete = 1)\n\n AND t1.txr <> t2.bsbtpr\n -- 去重复\n AND t2.id not in (\n select MAX(t2.id) as id\n FROM formtable_main_114 t1\n INNER JOIN formtable_main_114_dt1 t2 ON t2.mainid = t1.id\n INNER JOIN workflow_currentoperator t4 ON t4.requestid = t1.requestId\n WHERE (t4.isremark = 4)\n AND (t4.iscomplete = 1)\n --被打分人\n AND bsbtpr = {id}\n AND t1.txrq >= '{start_date}'\n AND t1.txrq < '{end_date}'\n GROUP BY txr, bsbtpr\n HAVING COUNT(*) > 1)\n '''\n elif group == 6:\n sql = f'''\n SELECT -- t1.txr,\n {dtails},\n -- t2.bsbtpr,\n -- t4.lastname as btprName,\n -- t4.loginid,\n (convert(Int, s1.name) + convert(Int, s2.name) + convert(Int, s3.name)\n + convert(Int, s4.name) + convert(Int, s5.name) + convert(Int, s6.name) + convert(Int, s7.name)\n + convert(Int, s8.name) + convert(Int, s9.name) + convert(Int, s10.name) + convert(Int, s11.name))\n as total,\n s1.name as v1,\n s2.name as v2,\n s3.name as v3,\n s4.name as v4,\n s5.name as v5,\n s6.name as v6,\n s7.name as v7,\n s8.name as v8,\n s9.name as v9,\n s10.name as v10,\n s11.name as v11\n FROM formtable_main_114 t1\n INNER JOIN formtable_main_114_dt1 t2 ON t2.mainid = t1.id\n INNER JOIN workflow_currentoperator t3 ON t3.requestid = t1.requestId\n INNER JOIN HrmResource t4 ON t4.id = t2.bsbtpr\n\n INNER JOIN HrmResource t5 ON t5.id = t1.txr\n\n INNER JOIN mode_selectitempagedetail s1 ON s1.mainid = 24 AND s1.disorder = t2.f1\n INNER JOIN mode_selectitempagedetail s2 ON s2.mainid = 13 AND s2.disorder = t2.f2\n INNER JOIN mode_selectitempagedetail s3 ON s3.mainid = 13 AND s3.disorder = t2.f3\n INNER JOIN mode_selectitempagedetail s4 ON s4.mainid = 13 AND s4.disorder = t2.f4\n INNER JOIN mode_selectitempagedetail s5 ON s5.mainid = 13 AND s5.disorder = t2.f5\n INNER JOIN mode_selectitempagedetail s6 ON s6.mainid = 13 AND s6.disorder = t2.f6\n INNER JOIN mode_selectitempagedetail s7 ON s7.mainid = 13 AND s7.disorder = t2.f7\n INNER JOIN mode_selectitempagedetail s8 ON s8.mainid = 13 AND s8.disorder = t2.f8\n INNER JOIN mode_selectitempagedetail s9 ON s9.mainid = 21 AND s9.disorder = t2.f9\n INNER JOIN mode_selectitempagedetail s10 ON s10.mainid = 21 AND s10.disorder = t2.f10\n INNER JOIN mode_selectitempagedetail s11 ON s11.mainid = 21 AND s11.disorder = t2.f11\n WHERE (t3.isremark = 4)\n AND (t3.iscomplete = 1)\n --被打分人\n AND t2.bsbtpr = {id}\n AND t1.txrq >= '{start_date}'\n AND t1.txrq < '{end_date}'\n -- 去重复\n AND t2.id not in (\n select MAX(t2.id) as id\n FROM formtable_main_114 t1\n INNER JOIN formtable_main_114_dt1 t2 ON t2.mainid = t1.id\n INNER JOIN workflow_currentoperator t4 ON t4.requestid = t1.requestId\n WHERE (t4.isremark = 4)\n AND (t4.iscomplete = 1)\n --被打分人\n AND t2.bsbtpr = {id}\n AND t1.txrq >= '{start_date}'\n AND t1.txrq < '{end_date}'\n GROUP BY txr, bsbtpr\n HAVING COUNT(*) > 1\n )\n AND t4.id <> t5.id\n '''\n else:\n raise ValueError(group)\n df = pd.read_sql(sql, engine)\n\n # table = table.rename(columns=SHOW_COLS).to_html(index=False, classes='table-striped', border=0).replace(\n # 'dataframe', 'table')\n if group == 7:\n table = get_html(df.rename(columns=SHOW_COLS_EMP))\n else:\n table = get_html(df.rename(columns=SHOW_COLS))\n return table\n\n\ndef get_date_list():\n df = pd.read_sql(\"select distinct years, months from cp_result\", engine)\n df['dt'] = df.years.astype(str) + df.months.astype(str).str.pad(2, 'left', '0')\n return list(df.dt.values)\n\n\nDEP_FRAM = {}\n\n\nclass Tree:\n def __init__(self, id, departmentname):\n self.id = id\n self.departmentname = departmentname\n self.parent = []\n self.offspring = {}\n\n def add_parent(self, supdepid):\n self.parent.append(supdepid)\n\n def add_offspring(self, id, departmentname):\n if id not in DEP_FRAM:\n DEP_FRAM.setdefault(id, Tree(id, departmentname))\n self.offspring[id] = DEP_FRAM[id]\n\n def print_offsprings(self, id=None, pre_fix=''):\n print(pre_fix + self.departmentname)\n pre_fix += '+'\n for key in self.offspring:\n self.offspring[key].print_offsprings(pre_fix=pre_fix)\n\n def get_offsprings(self, id=None, pre_fix=''):\n res = [(self.id, pre_fix + self.departmentname)]\n pre_fix += '+'\n for key in self.offspring:\n res.extend(self.offspring[key].get_offsprings(pre_fix=pre_fix))\n return res\n\n\ndef get_department_framework():\n df = pd.read_sql('''\n SELECT id,departmentname,supdepid FROM hrmdepartment\n where canceled is null\n ''', engine)\n global DEP_FRAM\n DEP_FRAM = {0: Tree(0, 'ALL')}\n\n for i, (id, dep, sup) in df.iterrows():\n DEP_FRAM[id] = Tree(id, dep)\n\n for i, (id, dep, sup) in df.iterrows():\n if sup not in DEP_FRAM: continue\n DEP_FRAM[sup].add_offspring(id, dep)\n return DEP_FRAM\n\n\ndef get_auth_department():\n df = pd.read_sql(f'''\n select loginid,departmentid from permission\n ''', engine)\n auth_list = df.groupby('loginid').agg(list).to_dict()['departmentid']\n # df = df.set_index('loginid')\n # return df.to_dict()['departmentid']\n return auth_list\n\n\n# 权限控制\ndef has_auth(user, name):\n # 同一个人\n # 拥有部门权限的人\n return True\n\n\ndef get_sup_dep_loginId():\n df = pd.read_sql('''\n select loginid, supdepId from a_CpYgDepBind sup\n inner join HrmResource h on sup.childId=h.departmentid\n ''', engine)\n\n sup_dep = {l: d for l, d in df.values}\n return sup_dep\n\n\ndef get_dep_loginId():\n df = pd.read_sql('''\n select loginid, departmentid from HrmResource\n ''', engine)\n\n loginId_dep = {l: d for l, d in df.values}\n return loginId_dep\n\n\ndef get_sup_dep():\n df = pd.read_sql('''\n select childId, supdepId from a_CpYgDepBind\n ''', engine)\n\n sup_dep = {l: d for l, d in df.values}\n return sup_dep\n\n\ndef get_sup_permission():\n df = pd.read_sql('''\n select loginid, departmentid from sup_permission\n ''', engine)\n\n sup_permission = df.groupby('loginid').agg(list).to_dict()['departmentid']\n # sup_dep = {l:d for l,d in df.values}\n return sup_permission\n\n\ndef get_loginid_group():\n df = pd.read_sql('''\n select loginid, category from\n (select loginid, category,\n row_number() over (partition by loginid, category order by years desc, months desc) rn\n from result_all) A\n where rn = 1\n ''', engine)\n return dict(df.values)\n", "sub_path": "data_prepare/exp.py", "file_name": "exp.py", "file_ext": "py", "file_size_in_byte": 23482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 75, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 82, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 92, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 92, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 95, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 95, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 99, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 99, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 101, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 101, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 108, "usage_type": "call"}, {"api_name": "_plotly_utils.utils.PlotlyJSONEncoder", "line_number": 108, "usage_type": "name"}, {"api_name": "utils.get_cut_val", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 209, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 209, "usage_type": "argument"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 222, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 222, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 232, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 232, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 234, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 234, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 236, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 236, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 238, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 238, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 241, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 241, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 243, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 243, "usage_type": "name"}, {"api_name": "pandas.read_sql", "line_number": 255, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 257, "usage_type": "argument"}, {"api_name": "copy.deepcopy", "line_number": 301, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 311, "usage_type": "call"}, {"api_name": "_plotly_utils.utils.PlotlyJSONEncoder", "line_number": 311, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 318, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 329, "usage_type": "call"}, {"api_name": "_plotly_utils.utils.PlotlyJSONEncoder", "line_number": 329, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 358, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 360, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 360, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 361, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 361, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 490, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 490, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 502, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 502, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 540, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 543, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 557, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 559, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 574, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 577, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 584, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 586, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 593, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 595, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 602, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 604, "usage_type": "argument"}, {"api_name": "pandas.read_sql", "line_number": 612, "usage_type": "call"}, {"api_name": "utils.engine", "line_number": 618, "usage_type": "argument"}]} +{"seq_id": "121797952", "text": "import json\nimport os\nimport os.path as osp\nimport random\nimport torch\nfrom torch_geometric.data import Dataset\nimport rescue.dataset.data_adapter as data_adapter\n\n\nclass RescueDataset(Dataset):\n def __init__(self, root, agent_type, comp=None, scenario=None, team=None, node_classification=False,\n start_datetime=None, end_datetime=None, read_info_map=False, transform=None, pre_transform=None,\n max_cache_size=100):\n self.comp = comp\n self.scenario = scenario\n self.team = team\n self.agent_type = agent_type\n self.cache = {}\n self.max_cache_size = max_cache_size\n self.node_classification = node_classification\n self.start_datetime = start_datetime\n self.end_datetime = end_datetime\n self.read_info_map = read_info_map\n\n self.metadata = {}\n\n super(RescueDataset, self).__init__(root, transform, pre_transform)\n self.index_to_filename, self.index_to_inner_index, self.graph_count = \\\n data_adapter.create_index_lookup(self.metadata)\n\n @property\n def dataset_pattern(self):\n pattern = \"\"\n if self.comp:\n pattern += self.comp\n if self.scenario:\n pattern += \"_\" + self.scenario\n if self.team:\n pattern += \"_\" + self.team\n pattern += \"_\" + self.agent_type\n return pattern\n\n @property\n def raw_file_names(self):\n filenames = [filename for filename in os.listdir(self.raw_dir)\n if self.dataset_pattern in filename and filename[-3:] == \"zip\" and\n data_adapter.is_datetime_valid(self.start_datetime, self.end_datetime, filename)]\n return sorted(filenames)\n\n @property\n def raw_dir(self):\n return self.root\n\n @property\n def processed_file_names(self):\n return [\"data.pt\", \"pre_filter.pt\", \"pre_transform.pt\"]\n\n @property\n def num_classes(self):\n r\"\"\"The number of classes in the dataset.\"\"\"\n # whether node is selected as target or not\n if self.node_classification:\n return 2\n else:\n data = self.get(0)\n return data.x.size(0)\n\n def len(self):\n return self.graph_count\n\n def get(self, idx):\n if idx < 0 or idx > self.len():\n raise IndexError()\n\n filename = self.index_to_filename[idx]\n data_list = self.read_raw_dataset_file(filename)\n data = data_list[self.index_to_inner_index[idx]]\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n return data\n\n def process(self):\n self.read_metadata()\n # print(self.raw_file_names)\n # print(self.dataset_pattern)\n for filename in self.raw_file_names:\n if filename in self.metadata:\n # print(\"skipping:\" + filename + \" already processed.\")\n continue\n # print(\"Processing: \" + filename)\n full_filename = osp.join(self.raw_dir, filename)\n json_data = data_adapter.read_raw_json_file(full_filename)\n num_graph = data_adapter.get_num_graph(json_data)\n self.add_metadata(filename, \"num_graph\", num_graph)\n self.save_metadata()\n\n def read_raw_dataset_file(self, filename):\n if filename in self.cache:\n return self.cache[filename]\n\n full_filename = osp.join(self.raw_dir, filename)\n json_data = data_adapter.read_raw_json_file(full_filename)\n data_list = data_adapter.create_graph_data(json_data, self.node_classification, self.read_info_map)\n if len(self.cache) > self.max_cache_size:\n del self.cache[random.choice(self.cache.keys())]\n self.cache[filename] = data_list\n return data_list\n\n def read_metadata(self):\n for file_name in self.raw_file_names:\n metadata_filename = file_name.replace('.zip', '_metadata.json')\n metadata_filename = osp.join(self.root, metadata_filename)\n if osp.exists(metadata_filename):\n with open(metadata_filename) as json_file:\n self.metadata[file_name] = json.load(json_file)\n\n def add_metadata(self, filename, data_key, data_value):\n if filename not in self.metadata:\n self.metadata[filename] = {}\n self.metadata[filename][data_key] = data_value\n\n def save_metadata(self):\n for filename_key in self.metadata:\n metadata_filename = filename_key.replace('.zip', '_metadata.json')\n with open(osp.join(self.root, metadata_filename), \"w\") as json_file:\n json.dump(self.metadata[filename_key], json_file)\n\n\nif __name__ == \"__main__\":\n # dataset = RescueDataset(\"/home/okan/rescuesim/rcrs-server/dataset\", \"firebrigade\", comp=\"robocup2019\",\n # scenario=\"test2\", team=\"ait\", node_classification=False)\n # print(dataset.calculate_class_distribution())\n # # print(dataset[1001])\n # print(len(dataset))\n # print(dataset[10])\n # print(dataset.num_classes)\n\n from dataset.inmemory_rescue_dataset import InMemoryRescueDataset\n from torch_geometric.data.dataloader import DataLoader\n test_dataset = InMemoryRescueDataset([], node_classification=False)\n test_dataset.load('test_dataset.pt')\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)\n\n for test_data in test_loader:\n print(test_data)\n print(test_data.batch)\n print(test_data.batch.size())\n print(test_data.x.size(0))\n print(test_data.batch.dtype)\n batch_indexes = torch.zeros(test_data.x.size(0), dtype=torch.long)\n print(batch_indexes)", "sub_path": "rescue/dataset/rescue_dataset.py", "file_name": "rescue_dataset.py", "file_ext": "py", "file_size_in_byte": 5687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch_geometric.data.Dataset", "line_number": 10, "usage_type": "name"}, {"api_name": "rescue.dataset.data_adapter.create_index_lookup", "line_number": 29, "usage_type": "call"}, {"api_name": "rescue.dataset.data_adapter", "line_number": 29, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "rescue.dataset.data_adapter.is_datetime_valid", "line_number": 47, "usage_type": "call"}, {"api_name": "rescue.dataset.data_adapter", "line_number": 47, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "name"}, {"api_name": "rescue.dataset.data_adapter.read_raw_json_file", "line_number": 92, "usage_type": "call"}, {"api_name": "rescue.dataset.data_adapter", "line_number": 92, "usage_type": "name"}, {"api_name": "rescue.dataset.data_adapter.get_num_graph", "line_number": 93, "usage_type": "call"}, {"api_name": "rescue.dataset.data_adapter", "line_number": 93, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "name"}, {"api_name": "rescue.dataset.data_adapter.read_raw_json_file", "line_number": 102, "usage_type": "call"}, {"api_name": "rescue.dataset.data_adapter", "line_number": 102, "usage_type": "name"}, {"api_name": "rescue.dataset.data_adapter.create_graph_data", "line_number": 103, "usage_type": "call"}, {"api_name": "rescue.dataset.data_adapter", "line_number": 103, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "name"}, {"api_name": "json.load", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 126, "usage_type": "call"}, {"api_name": "dataset.inmemory_rescue_dataset.InMemoryRescueDataset", "line_number": 140, "usage_type": "call"}, {"api_name": "torch_geometric.data.dataloader.DataLoader", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "56209840", "text": "from ingester.difference_storage import *\nimport datetime\nimport pickle\nimport msgpack\nimport json\nfrom pympler import asizeof\nimport timeit\n\n\n\ndef get_pub_dict(url_id=None, title=None, pages=None, note=None, doi=None, abstract= None, copyright = None,\n date_published=None, volume= None, number = None):\n return{\n \"url_id\": url_id,\n \"title\":title,\n \"pages\": pages,\n \"note\": note,\n \"doi\": doi,\n \"abstract\": abstract,\n \"copyright\": copyright,\n \"date_published\": date_published,\n \"volume\": volume,\n \"number\": number,\n }\n\n\n\ndef generate_test_data(number):\n result = generate_diff_store(get_pub_dict(url_id=0, title=\"Hello World\",\n date_published=1990-11-10))\n for i in range(1,number+1):\n added_values = get_pub_dict(url_id=i, title=\"Hello World\"+str(i),\n date_published=\"1990-11-11\",\n abstract=\"Test Text\"+str(i),\n doi = str(i),\n note=\"Lorem Ipsum Dolor\"+ str(i),\n pages=\"11{}-22{}\".format(i,i),\n volume=str(i),\n number=str(i),\n copyright=\"Copyright\"+ str(i))\n insert_diff_store(added_values, result)\n\n return result\n\n\n\nraw_data =[\n generate_test_data(1),\n generate_test_data(5),\n generate_test_data(10),\n generate_test_data(30),\n generate_test_data(62),\n]\n\nsize_data = [asizeof.asizeof(x) for x in raw_data]\nprint(\"Raw-size\",size_data)\n\n\npacked_pickle = [pickle.dumps(x) for x in raw_data]\npacked_pickle_size =[asizeof.asizeof(pickle.dumps(x)) for x in raw_data]\nprint(\"Pickle\",packed_pickle_size)\n\n\npacked_json = [json.dumps(x) for x in raw_data]\npacked_json_size =[asizeof.asizeof(json.dumps(x)) for x in raw_data]\nprint(\"JSON\",packed_json_size)\n\npacked_mp = [msgpack.packb(x) for x in raw_data]\npacked_mp_size =[asizeof.asizeof(msgpack.packb(x)) for x in raw_data]\nprint(\"Msg Pack\", packed_mp_size)\n\n\ndef pickle_stuff(index):\n return pickle.dumps(raw_data[index])\n\ndef json_stuff(index):\n return json.dumps(raw_data[index])\n\ndef msg_stuff(index):\n return msgpack.packb(raw_data[index])\n\n\ndef un_pickle_stuff(index):\n return pickle.loads(packed_pickle[index])\n\ndef un_json_stuff(index):\n return json.loads(packed_json[index])\n\ndef un_msg_stuff(index):\n return msgpack.unpackb(packed_mp[index])\n\nresult_packing = {\n \"json\":[],\n \"pickle\":[],\n \"msgp\":[],\n}\nfor i in range(len(raw_data)):\n tp = timeit.Timer(\"pickle_stuff({})\".format(i), \"from analysis.serialisation import pickle_stuff\")\n result_packing[\"pickle\"].append(tp.timeit(number=10000))\n tj = timeit.Timer(\"json_stuff({})\".format(i), \"from analysis.serialisation import json_stuff\")\n result_packing[\"json\"].append(tj.timeit(number=10000))\n tm = timeit.Timer(\"msg_stuff({})\".format(i), \"from analysis.serialisation import msg_stuff\")\n result_packing[\"msgp\"].append(tm.timeit(number=10000))\n\n\nresult_unpacking = {\n \"json\":[],\n \"pickle\":[],\n \"msgp\":[],\n}\nfor i in range(len(raw_data)):\n tp = timeit.Timer(\"un_pickle_stuff({})\".format(i), \"from analysis.serialisation import un_pickle_stuff\")\n result_unpacking[\"pickle\"].append(tp.timeit(number=10000))\n tj = timeit.Timer(\"un_json_stuff({})\".format(i), \"from analysis.serialisation import un_json_stuff\")\n result_unpacking[\"json\"].append(tj.timeit(number=10000))\n tm = timeit.Timer(\"un_msg_stuff({})\".format(i), \"from analysis.serialisation import un_msg_stuff\")\n result_unpacking[\"msgp\"].append(tm.timeit(number=10000))\n\nprint(\"PACKING\")\nprint(result_packing)\nprint(\"UNPACKING\")\nprint(result_unpacking)", "sub_path": "analysis/serialisation.py", "file_name": "serialisation.py", "file_ext": "py", "file_size_in_byte": 3833, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pympler.asizeof.asizeof", "line_number": 55, "usage_type": "call"}, {"api_name": "pympler.asizeof", "line_number": 55, "usage_type": "name"}, {"api_name": "pickle.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "pympler.asizeof.asizeof", "line_number": 60, "usage_type": "call"}, {"api_name": "pympler.asizeof", "line_number": 60, "usage_type": "name"}, {"api_name": "pickle.dumps", "line_number": 60, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 64, "usage_type": "call"}, {"api_name": "pympler.asizeof.asizeof", "line_number": 65, "usage_type": "call"}, {"api_name": "pympler.asizeof", "line_number": 65, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "msgpack.packb", "line_number": 68, "usage_type": "call"}, {"api_name": "pympler.asizeof.asizeof", "line_number": 69, "usage_type": "call"}, {"api_name": "pympler.asizeof", "line_number": 69, "usage_type": "name"}, {"api_name": "msgpack.packb", "line_number": 69, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "msgpack.packb", "line_number": 80, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}, {"api_name": "msgpack.unpackb", "line_number": 90, "usage_type": "call"}, {"api_name": "timeit.Timer", "line_number": 98, "usage_type": "call"}, {"api_name": "timeit.Timer", "line_number": 100, "usage_type": "call"}, {"api_name": "timeit.Timer", "line_number": 102, "usage_type": "call"}, {"api_name": "timeit.Timer", "line_number": 112, "usage_type": "call"}, {"api_name": "timeit.Timer", "line_number": 114, "usage_type": "call"}, {"api_name": "timeit.Timer", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "74383738", "text": "# -*- coding: utf-8 -*-\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pinterest_feed.models import Pin, PinPublishThumblr\nfrom django.contrib.auth.models import User\nimport re\nimport pytumblr\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('pin_id', nargs='+', type=int)\n\n def handle(self, *args, **options):\n c = 0\n b = 0\n for item in options['pin_id']:\n pin = Pin.objects.get(id=int(item))\n check_publish = self.create_post_tumblr(pin.text.encode('utf-8'), pin.img_url, c, b)\n if (check_publish): \n user = User.objects.get(id=1)\n publish = PinPublishThumblr(user = user, pin_item = pin)\n publish.save() \n c +=1\n b +=1\n\n def create_post_tumblr(self, title, img_url, c, b):\n client = pytumblr.TumblrRestClient(\n 'cSXsJ8y4YkGsJkhwGQPnY1RIgkXUFvTRtS7MKC7QWBoKbCpwWF',\n 'b8yO1HSR0eMgfjwpDjXrGnZsWqYkt0SiZl0Gq4ZhH9F0BVb1KA',\n 'oKm2HhFJZN5iApkTwpbOdNMhMxW8Ds5sAScP0rfO2acGP8kfC0',\n 'ipu2LoD1fGxv9shFxrMeg9RW716Y65xbJ0npuVPtslKpGppp9m'\n )\n\n\n title = re.sub(r'(http|ftp|https)://([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?', ' ', title) \n if (title == ''):\n title = ''\n \n\n if b % 2 == 0:\n post = client.create_photo('animegirlpin', state=\"queue\", tags=['anime girl',], caption=title, source=str(img_url))\n else:\n post = client.create_photo('anime2018', state=\"queue\", tags=['anime girl',], caption=title, source=str(img_url))\n\n result = 'id' in post\n return result\n\n \n ", "sub_path": "pinterest_feed/management/commands/create_post_tumblr.py", "file_name": "create_post_tumblr.py", "file_ext": "py", "file_size_in_byte": 1772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 10, "usage_type": "name"}, {"api_name": "pinterest_feed.models.Pin.objects.get", "line_number": 18, "usage_type": "call"}, {"api_name": "pinterest_feed.models.Pin.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pinterest_feed.models.Pin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "pinterest_feed.models.PinPublishThumblr", "line_number": 22, "usage_type": "call"}, {"api_name": "pytumblr.TumblrRestClient", "line_number": 28, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "636703210", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom utils.init_weights import init_weights, normalized_columns_initializer\nfrom core.model import Model\n\nclass A3CCnnDisMjcModel(Model):\n def __init__(self, args):\n super(A3CCnnDisMjcModel, self).__init__(args)\n # build model\n # 0. feature layers\n # Input Dim 64x64\n self.dof = args.dof\n self.action_dim = args.action_dim\n self.output_dims = self.action_dim * self.dof\n self.conv1 = nn.Conv2d(self.input_dims[0], 16, kernel_size=8, stride=4) # NOTE: for pkg=\"atari\"\n self.rl1 = nn.ReLU()\n self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)\n self.rl2 = nn.ReLU()\n self.fc_3 = nn.Linear(6*6*32, self.hidden_dim)\n self.lstm = nn.LSTMCell(self.hidden_dim, self.hidden_dim, 1)\n # 1. policy output\n self.policy_4 = nn.Linear(self.hidden_dim, self.output_dims)\n self.policy_5_list = []\n for i in range(self.dof):\n self.policy_5_list.append(nn.Softmax())\n # 2. value output\n self.value_4 = nn.Linear(self.hidden_dim, 1)\n\n self._reset()\n\n def _init_weights(self):\n self.apply(init_weights)\n self.policy_4.weight.data = normalized_columns_initializer(self.policy_4.weight.data, 0.01)\n self.policy_4.bias.data.fill_(0)\n self.value_4.weight.data = normalized_columns_initializer(self.value_4.weight.data, 1.0)\n self.value_4.bias.data.fill_(0)\n\n self.lstm.bias_ih.data.fill_(0)\n self.lstm.bias_hh.data.fill_(0)\n\n def forward(self, x, lstm_hidden_vb=None):\n assert self.input_dims[1] == 64\n x = x.view(x.size(0), self.input_dims[0], self.input_dims[1], self.input_dims[1])\n x = self.rl1(self.conv1(x))\n x = self.rl2(self.conv2(x))\n x = x.view(-1, 6*6*32)\n x = self.fc_3(x)\n x, c = self.lstm(x, lstm_hidden_vb)\n p = self.policy_4(x)\n p_list = []\n for i in range(self.dof):\n p_list.append(self.policy_5_list[i](p[:,i*self.action_dim : (i + 1) * self.action_dim]))\n v = self.value_4(x)\n return p_list, v, (x, c)\n", "sub_path": "core/models/a3c_cnn_dis_mjc.py", "file_name": "a3c_cnn_dis_mjc.py", "file_ext": "py", "file_size_in_byte": 2343, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "core.model.Model", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.LSTMCell", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.init_weights.init_weights", "line_number": 39, "usage_type": "argument"}, {"api_name": "utils.init_weights.normalized_columns_initializer", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.init_weights.normalized_columns_initializer", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "72840214", "text": "# This is a module dedicated to gathering, organizing and preparing tweets for sentiment analysis. \n\nfrom __future__ import unicode_literals\nfrom utils import Utils\nimport os\nimport tweepy\nimport json\nimport re\nimport spacy\nimport pandas as pd\nimport numpy as np \n\nROOT_PATH = Utils().globals.get('ROOT_PATH')\n\n\nclass Twitter_Searcher:\n \"\"\"\n Module for Winston. \n\n Searches twitter via tweepy in a multitude of ways, gathers tweets and saves them as json.\n Access tweet texts via Twitter_Searcher().tweets or full tweets via .tweets_full\n \"\"\"\n\n CONSUMER_KEY = os.environ['TWITTER_CONSUMER_KEY']\n CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET']\n ACCESS_TOKEN = os.environ['TWITTER_ACCESS_TOKEN']\n ACCESS_TOKEN_SECRET = os.environ['TWITTER_ACCESS_TOKEN_SECRET']\n\n\n def __init__(self, query, count):\n self.__setup_auth__()\n self.__search__(query, count)\n self.__clean_texts__()\n self.__lemmatize__texts__()\n self.__create_set__()\n \n \n def __setup_auth__(self):\n self.auth = tweepy.OAuthHandler(Twitter_Searcher.CONSUMER_KEY, Twitter_Searcher.CONSUMER_SECRET)\n self.auth.set_access_token(Twitter_Searcher.ACCESS_TOKEN, Twitter_Searcher.ACCESS_TOKEN_SECRET)\n self.api = tweepy.API(self.auth)\n\n def __search__(self, query, count):\n \"\"\"\n Gathers a given number of tweets related to the given query\n\n query: The search term \"Tesla\", \"#art\" etc.\n count: Number of tweets to be returned.\n \"\"\"\n self.tweets_full = self.api.search(query, show_user=True, tweet_mode='extended', count=count, lang='en')\n self.tweets_full = [tweet._json for tweet in self.tweets_full]\n\n texts = [self.__check_if_retweet__(tweet) for tweet in self.tweets_full]\n ids = [self.__get_id__(tweet) for tweet in self.tweets_full]\n tweets = {'id': ids, 'text': texts}\n self.tweets = pd.DataFrame(data=tweets)\n\n def __clean_texts__(self):\n # Remove http links from texts\n self.tweets['text'] = self.tweets['text'].apply(lambda text: re.sub(r'http\\S+', '', text, flags=re.MULTILINE)) \n\n # Remove newlines\n self.tweets['text'] = self.tweets['text'].apply(lambda text: text.replace('\\n', '. '))\n\n # Remove emojis etc\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\"\n \"]+\", flags=re.UNICODE)\n emoji_pattern_2 = re.compile('[\\U00010000-\\U0010ffff]', flags=re.UNICODE)\n def emoji_remover(text):\n text = emoji_pattern.sub(r'', text)\n return emoji_pattern_2.sub(r'', text)\n\n self.tweets['text'] = self.tweets['text'].apply(emoji_remover)\n\n #Remove @ and names\n self.tweets['text'] = self.tweets['text'].apply(lambda text: re.sub(r'@\\S+', '', text, flags=re.MULTILINE))\n\n def __lemmatize__texts__(self):\n nlp = spacy.load('en_core_web_sm')\n\n def lemma(text):\n doc = nlp(text)\n sent = []\n for word in doc:\n if word.lemma_ == '-PRON-':\n sent.append(word.text)\n elif word.shape_[0] == 'X':\n sent.append(word.text)\n elif word.text[0] == '@':\n sent.append(word.text)\n else:\n sent.append(word.lemma_)\n return ' '.join(sent)\n\n self.lemmatized_tweets = self.tweets\n self.lemmatized_tweets['text'] = self.lemmatized_tweets['text'].apply(lemma)\n\n def __create_set__(self):\n self.unique_tweets = self.tweets.drop_duplicates()\n self.lemmatized_unique_tweets = self.lemmatized_tweets.drop_duplicates()\n\n def __check_if_retweet__(self, tweet):\n text = ''\n if tweet['full_text'].split()[0].lower() == 'rt':\n try:\n text = tweet['retweeted_status']['full_text']\n except:\n pass\n else:\n text = tweet['full_text']\n\n return text\n \n def __get_id__(self, tweet):\n id_nr = ''\n if tweet['full_text'].split()[0].lower() == 'rt':\n try:\n id_nr = tweet['retweeted_status']['id']\n except:\n pass\n else:\n id_nr = tweet['id']\n\n return id_nr\n\n def __get_test_tweets__(self):\n self.tweets_full = json.load(open(ROOT_PATH + 'abilities\\\\finance\\\\tweet_examples.json'))\n return self.tweets_full\n \n def __save_tweets_to_file__(self, file_name='output.json'):\n with open(ROOT_PATH + 'abilities\\\\finance\\\\twitter_searcher_files\\\\' + file_name, 'w') as file:\n json.dump(self.tweets_full, fp=file)\n\n def print_texts(self):\n index = 1\n for text in self.tweets['text']:\n print(f'=================== {index} ======================')\n print(text)\n print('')\n index += 1\n\n def print_unique_texts(self):\n index = 1\n for text in self.unique_tweets:\n print(f'=================== {index} ======================')\n print(text)\n print('')\n index += 1\n\n", "sub_path": "python/twitter_searcher.py", "file_name": "twitter_searcher.py", "file_ext": "py", "file_size_in_byte": 5329, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "utils.Utils", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tweepy.OAuthHandler", "line_number": 39, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 60, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 66, "usage_type": "call"}, {"api_name": "re.UNICODE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 72, "usage_type": "call"}, {"api_name": "re.UNICODE", "line_number": 72, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 80, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 80, "usage_type": "attribute"}, {"api_name": "spacy.load", "line_number": 83, "usage_type": "call"}, {"api_name": "json.load", "line_number": 131, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "427430187", "text": "# -*- coding: UTF-8 -*-\n# File: prefetch.py\n# Author: Yuxin Wu <ppwwyyxx@gmail.com>\n\nimport multiprocessing\n\nfrom .base import DataFlow\nfrom ..utils.concurrency import ensure_procs_terminate\n\n__all__ = ['PrefetchData']\n\nclass Sentinel:\n pass\n\nclass PrefetchProcess(multiprocessing.Process):\n def __init__(self, ds, queue):\n \"\"\"\n ds: ds to take data from\n queue: output queue to put results in\n \"\"\"\n super(PrefetchProcess, self).__init__()\n self.ds = ds\n self.queue = queue\n\n def run(self):\n self.ds.reset_state()\n try:\n for dp in self.ds.get_data():\n self.queue.put(dp)\n finally:\n self.queue.put(Sentinel())\n\n\nclass PrefetchData(DataFlow):\n def __init__(self, ds, nr_prefetch, nr_proc=1):\n \"\"\"\n use multiprocess\n \"\"\"\n self.ds = ds\n self._size = self.ds.size()\n self.nr_proc = nr_proc\n self.nr_prefetch = nr_prefetch\n\n def size(self):\n return self._size\n\n def get_data(self):\n queue = multiprocessing.Queue(self.nr_prefetch)\n procs = [PrefetchProcess(self.ds, queue) for _ in range(self.nr_proc)]\n ensure_procs_terminate(procs)\n [x.start() for x in procs]\n\n end_cnt = 0\n tot_cnt = 0\n try:\n while True:\n dp = queue.get()\n if isinstance(dp, Sentinel):\n end_cnt += 1\n if end_cnt == self.nr_proc:\n break\n continue\n tot_cnt += 1\n yield dp\n if tot_cnt == self._size:\n break\n finally:\n queue.close()\n [x.terminate() for x in procs]\n\n", "sub_path": "tensorpack/dataflow/prefetch.py", "file_name": "prefetch.py", "file_ext": "py", "file_size_in_byte": 1769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "multiprocessing.Process", "line_number": 15, "usage_type": "attribute"}, {"api_name": "base.DataFlow", "line_number": 34, "usage_type": "name"}, {"api_name": "multiprocessing.Queue", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.concurrency.ensure_procs_terminate", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "351229931", "text": "#author;R.Kunimoto, TAKENAKA co.\r\n#coding:utf-8\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\n\r\npath = \"C:\\\\Users\\\\1500570\\\\Documents\\\\R\\\\WS\\\\dataset_a4\"\r\nfn = \"data1_his.csv\"\r\n\r\n\r\nep = 0\r\nff = True\r\nlist = []\r\nwhile ep < 9:\r\n print(str(ep)+\"th starting\")\r\n \"\"\"\r\n x = 100 + 15 * np.random.randn(10000)\r\n print(x)\r\n \"\"\"\r\n file = csv.reader(open(path+\"\\\\\"+fn,\"r\"))\r\n x = np.array([])\r\n for line in file:\r\n if ff == True:\r\n label_y = str(line[ep])\r\n print(label_y)\r\n ff = False\r\n continue\r\n else:\r\n x = np.append(x,float(line[ep]))\r\n print(x)\r\n \"\"\"\r\n mu, sigma = 100, 15\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1,1,1)\r\n\r\n ax.hist(x, bins=50)\r\n ax.set_title('first histogram $\\mu=100,\\ \\sigma=15$')\r\n ax.set_xlabel('x')\r\n ax.set_ylabel(label_y)\r\n fig.save(path+\"\\\\sample\"+str(ep)+\".jpg\")\r\n \"\"\"\r\n vari = np.var(x)\r\n avr = np.average(x)\r\n y = 1 / np.sqrt(2 * np.pi * vari ) * np.exp(-(x - avr) ** 2 / (2 * vari))\r\n plt.subplot(3,3,ep+1)\r\n plt.hist(x)\r\n # plt.plot(x,y)\r\n plt.title(label_y,fontsize=7)\r\n plt.xlabel(\"fluc\", fontsize=7)\r\n plt.ylabel(\"num_seq\", fontsize=7)\r\n plt.tick_params(labelsize=7)\r\n ff = True\r\n print(str(ep)+\"th ending\")\r\n ep += 1\r\nplt.savefig(path+\"\\\\sample\"+str(ep)+\".png\")", "sub_path": "a4_hist.py", "file_name": "a4_hist.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "csv.reader", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "530582655", "text": "from flask import abort, current_app, flash, redirect, request, Blueprint, jsonify, url_for\nfrom flask_restful import Api, Resource, reqparse\nfrom flask_login import current_user\nfrom opsy.auth.access import permissions\nfrom opsy.auth.models import User, Role\nfrom opsy.exceptions import DuplicateError\n\n\ncore_api = Blueprint('core_api', __name__, url_prefix='/api') # pylint: disable=invalid-name\napi = Api(core_api) # pylint: disable=invalid-name\n\n\nclass Login(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('username', required=True, location=['form', 'json'])\n self.reqparse.add_argument('password', required=True, location=['form', 'json'])\n self.reqparse.add_argument('remember_me', type=bool, location='form')\n self.reqparse.add_argument('force_renew', type=bool, location='json')\n super().__init__()\n\n def get(self): # pylint: disable=no-self-use\n if not permissions.get('logged_in').can():\n abort(401)\n return jsonify(current_user.get_session_token(current_app))\n\n def post(self):\n args = self.reqparse.parse_args()\n token = User.login(current_app, args['username'], args['password'],\n remember=args['remember_me'])\n if token:\n if request.is_json:\n return jsonify(token)\n return redirect(url_for('core_main.about'))\n elif request.is_json:\n abort(401, 'Username or password incorrect.')\n else:\n flash('Username or password incorrect.')\n return redirect(url_for('core_main.about'))\n\n\nclass Logout(Resource):\n\n def get(self): # pylint: disable=no-self-use\n current_user.logout(current_app)\n return redirect(url_for('core_main.about'))\n\n\nclass RolesAPI(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('name')\n self.reqparse.add_argument('ldap_group')\n self.reqparse.add_argument('description')\n super().__init__()\n\n def post(self):\n self.reqparse.replace_argument('name', required=True)\n args = self.reqparse.parse_args()\n if not permissions.get('roles_create').can():\n abort(403)\n try:\n role = Role.create(**args)\n except (DuplicateError, ValueError) as error:\n abort(400, str(error))\n return jsonify({'roles': [role.get_dict()]})\n\n def get(self):\n args = self.reqparse.parse_args()\n if not permissions.get('roles_read').can():\n abort(403)\n roles = Role.query.wtfilter_by(prune_none_values=True,\n **args).all_dict_out()\n return jsonify({'roles': roles})\n\n\nclass RoleAPI(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super().__init__()\n\n def get(self, role_name): # pylint: disable=no-self-use\n role = Role.query.wtfilter_by(name=role_name).first()\n if role and permissions.get('roles_read').can():\n return jsonify({'roles': [role.get_dict()]})\n abort(403)\n\n def patch(self, role_name):\n if not permissions.get('roles_update').can():\n abort(403)\n self.reqparse.add_argument('name')\n self.reqparse.add_argument('ldap_group')\n self.reqparse.add_argument('description')\n args = self.reqparse.parse_args()\n role = Role.query.wtfilter_by(name=role_name).first()\n if not role:\n abort(404)\n role.update(prune_none_values=True, **args)\n return jsonify({'roles': [role.get_dict()]})\n\n def delete(self, role_name): # pylint: disable=no-self-use\n if not permissions.get('roles_delete').can():\n abort(403)\n role = Role.query.wtfilter_by(name=role_name).first()\n if not role:\n abort(404)\n role.delete()\n return ('', 202)\n\n\nclass UsersAPI(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('name')\n self.reqparse.add_argument('full_name')\n self.reqparse.add_argument('email')\n self.reqparse.add_argument('enabled')\n super().__init__()\n\n def post(self):\n if not permissions.get('users_create').can():\n abort(403)\n self.reqparse.replace_argument('name', required=True)\n args = self.reqparse.parse_args()\n try:\n user = User.create(**args)\n except (DuplicateError, ValueError) as error:\n abort(400, str(error))\n return jsonify({'users': [user.get_dict()]})\n\n def get(self):\n if not permissions.get('users_read').can():\n abort(403)\n args = self.reqparse.parse_args()\n users = User.query.wtfilter_by(prune_none_values=True, **args).all_dict_out()\n return jsonify({'users': users})\n\n\nclass UserAPI(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super().__init__()\n\n def get(self, user_name): # pylint: disable=no-self-use\n user = User.query.wtfilter_by(name=user_name).first()\n if user and permissions.get('user_read')(user.id).can():\n return jsonify({'users': [user.get_dict()]})\n abort(403)\n\n def patch(self, user_name):\n self.reqparse.add_argument('full_name')\n self.reqparse.add_argument('email')\n self.reqparse.add_argument('enabled')\n args = self.reqparse.parse_args()\n user = User.query.wtfilter_by(name=user_name).first()\n if not (user and permissions.get('user_update')(user.id).can()):\n abort(403)\n user.update(prune_none_values=True, **args)\n return jsonify({'users': [user.get_dict()]})\n\n def delete(self, user_name): # pylint: disable=no-self-use\n if not permissions.get('users_delete').can():\n abort(403)\n user = User.query.wtfilter_by(name=user_name).first()\n if not user:\n abort(404)\n user.delete()\n return ('', 202)\n\n\nclass UserSettingsAPI(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('key')\n self.reqparse.add_argument('value')\n super().__init__()\n\n def post(self, user_name):\n self.reqparse.replace_argument('key', required=True)\n self.reqparse.replace_argument('value', required=True)\n args = self.reqparse.parse_args()\n user = User.query.wtfilter_by(name=user_name).first()\n if not (user and permissions.get('user_update')(user.id).can()):\n abort(403)\n try:\n setting = user.add_setting(args['key'], args['value'])\n except DuplicateError as error:\n abort(400, str(error))\n return jsonify({'settings': [setting.get_dict()]})\n\n def get(self, user_name):\n user = User.query.wtfilter_by(name=user_name).first()\n if not (user and permissions.get('user_read')(user.id).can()):\n abort(403)\n return jsonify({'settings': [x.get_dict() for x in user.settings]})\n\n\nclass UserSettingAPI(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('value', required=True, location='json')\n super().__init__()\n\n def patch(self, user_name, setting_key):\n args = self.reqparse.parse_args()\n user = User.query.wtfilter_by(name=user_name).first()\n if not (user and permissions.get('user_update')(user.id).can()):\n abort(403)\n try:\n setting = user.modify_setting(setting_key, args['value'])\n except ValueError as error:\n abort(404, str(error))\n return jsonify({'settings': [setting.get_dict()]})\n\n def get(self, user_name, setting_key):\n user = User.query.wtfilter_by(name=user_name).first()\n if not (user and permissions.get('user_read')(user.id).can()):\n abort(403)\n try:\n setting = user.get_setting(setting_key, error_on_none=True)\n except ValueError as error:\n abort(404, str(error))\n return jsonify({'settings': [setting.get_dict()]})\n\n def delete(self, user_name, setting_key):\n user = User.query.wtfilter_by(name=user_name).first()\n if not (user and permissions.get('user_update')(user.id).can()):\n abort(403)\n try:\n user.remove_setting(setting_key)\n except ValueError as error:\n abort(404, str(error))\n return ('', 202)\n\n\napi.add_resource(\n Login, '/login',\n endpoint='login')\napi.add_resource(\n Logout, '/logout',\n endpoint='logout')\napi.add_resource(\n RolesAPI, '/roles',\n endpoint='roles')\napi.add_resource(\n RoleAPI, '/roles/<role_name>',\n endpoint='role')\napi.add_resource(\n UsersAPI, '/users',\n endpoint='users')\napi.add_resource(\n UserAPI, '/users/<user_name>',\n endpoint='user')\napi.add_resource(\n UserSettingsAPI, '/users/<user_name>/settings',\n endpoint='user_settings')\napi.add_resource(\n UserSettingAPI, '/users/<user_name>/settings/<setting_key>',\n endpoint='user_setting')\n", "sub_path": "opsy/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 9228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 13, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 16, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 24, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_login.current_user.get_session_token", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 26, "usage_type": "argument"}, {"api_name": "flask_login.current_user", "line_number": 26, "usage_type": "name"}, {"api_name": "opsy.auth.models.User.login", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 30, "usage_type": "argument"}, {"api_name": "opsy.auth.models.User", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.is_json", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 40, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 43, "usage_type": "name"}, {"api_name": "flask_login.current_user.logout", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 46, "usage_type": "argument"}, {"api_name": "flask_login.current_user", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 50, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 53, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 53, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 62, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 63, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.create", "line_number": 65, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role", "line_number": 65, "usage_type": "name"}, {"api_name": "opsy.exceptions.DuplicateError", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 68, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 72, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 73, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.query.wtfilter_by", "line_number": 74, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.query", "line_number": 74, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.Role", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 76, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 79, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 82, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 82, "usage_type": "name"}, {"api_name": "opsy.auth.models.Role.query.wtfilter_by", "line_number": 86, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.query", "line_number": 86, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.Role", "line_number": 86, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 87, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 89, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 92, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 93, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.query.wtfilter_by", "line_number": 98, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.query", "line_number": 98, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.Role", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 102, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 105, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 106, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.query.wtfilter_by", "line_number": 107, "usage_type": "call"}, {"api_name": "opsy.auth.models.Role.query", "line_number": 107, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.Role", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 109, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 114, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 117, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 117, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 125, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 125, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 126, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.create", "line_number": 130, "usage_type": "call"}, {"api_name": "opsy.auth.models.User", "line_number": 130, "usage_type": "name"}, {"api_name": "opsy.exceptions.DuplicateError", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 133, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 136, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 137, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 139, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 139, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 140, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 143, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 146, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 146, "usage_type": "name"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 150, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 150, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 150, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 151, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 151, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 153, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 160, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 160, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 160, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 161, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 164, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 167, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 168, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 169, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 169, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 169, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 171, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 176, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 179, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 179, "usage_type": "name"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 188, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 188, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 188, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 189, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 189, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 190, "usage_type": "call"}, {"api_name": "opsy.exceptions.DuplicateError", "line_number": 193, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 195, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 198, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 198, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 198, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 199, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 199, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 200, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 201, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 204, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 207, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 207, "usage_type": "name"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 213, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 213, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 213, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 214, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 215, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 219, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 220, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 223, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 223, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 223, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 224, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 224, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 229, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 230, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query.wtfilter_by", "line_number": 233, "usage_type": "call"}, {"api_name": "opsy.auth.models.User.query", "line_number": 233, "usage_type": "attribute"}, {"api_name": "opsy.auth.models.User", "line_number": 233, "usage_type": "name"}, {"api_name": "opsy.auth.access.permissions.get", "line_number": 234, "usage_type": "call"}, {"api_name": "opsy.auth.access.permissions", "line_number": 234, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 235, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 239, "usage_type": "call"}]} +{"seq_id": "356533190", "text": "\nimport unittest\nimport requests\nfrom publicdata.census.files.url_templates import *\nfrom publicdata.census.files.metafiles import Table, Column, TableShell, TableLookup, TableMeta\nfrom publicdata.census.files.generators import SequenceFile, GeoFile, Table as TableGenerator\nfrom rowgenerators import parse_app_url\nfrom itertools import islice\nimport csv\nimport logging\n\n\nclass TestGenerators(unittest.TestCase):\n\n def test_tableshell(self):\n\n ts = TableShell(2016, 1)\n\n ts._process()\n\n self.assertEqual(1319, len(ts.tables))\n\n self.assertEqual(['b15002h', 'c15002h', 'b15002i', 'c15002i', 'b15003',\n 'c15003', 'b15010', 'c15010', 'c15010a', 'c15010b'],\n list(ts.tables.keys())[500:510])\n\n self.assertEqual(ts.tables['c16004'].title,\n 'AGE BY LANGUAGE SPOKEN AT HOME BY ABILITY TO SPEAK ENGLISH FOR THE POPULATION 5 YEARS AND OVER')\n\n with open('/tmp/tables_shell.csv', 'w') as f:\n w = csv.writer(f)\n\n w.writerow(Table.csv_header)\n w.writerows(t.row for t in ts.tables.values())\n\n def test_tablelookup(self):\n\n ts = TableLookup(2016, 1)\n\n ts._process()\n\n self.assertEqual(1310, len(ts.tables))\n\n self.assertEqual(['c15002d', 'c15002e', 'c15002f', 'c15002g', 'c15002h',\n 'c15002i', 'c15003', 'c15010', 'c15010a', 'b15011'],\n list(ts.tables.keys())[500:510])\n\n self.assertEqual(ts.tables['c16004'].title,\n 'Age By Language Spoken At Home By Ability To Speak English For The Population 5 Years And Over')\n\n with open('/tmp/tables_lookup.csv', 'w') as f:\n w = csv.writer(f)\n\n w.writerow(Table.csv_header)\n w.writerows(t.row for t in ts.tables.values())\n\n def test_tablemeta(self):\n tm = TableMeta(2016, 1)\n\n tm._process()\n\n self.assertEqual(1319, len(tm.tables))\n\n self.assertEqual(['b15002h', 'c15002h', 'b15002i', 'c15002i',\n 'b15003', 'c15003', 'b15010', 'c15010', 'c15010a', 'c15010b'],\n list(tm.tables.keys())[500:510])\n\n self.assertEqual(tm.tables['c16004'].title,\n 'AGE BY LANGUAGE SPOKEN AT HOME BY ABILITY TO SPEAK ENGLISH FOR THE POPULATION 5 YEARS AND OVER')\n\n with open('/tmp/tables_meta.csv', 'w') as f:\n w = csv.writer(f)\n\n w.writerow(Table.csv_header)\n w.writerows(t.row for t in tm.tables.values())\n\n\n with open('/tmp/columns_meta.csv', 'w') as f:\n w = csv.writer(f)\n\n w.writerow(Column.csv_header)\n\n for t in tm.tables.values():\n for cn in sorted(t.columns):\n c = t.columns[cn]\n w.writerow(c.row )\n\n\n\n def test_geo(self):\n\n tm = GeoFile(2016, 5, 'RI', 140, 1)\n\n for row in islice(tm,10):\n print(row)\n\n def test_table(self):\n import geoid.core\n\n tm = TableGenerator(2016, 5, 'CA', geoid.core.names['tract'], 'B01001')\n\n tracts = list(tm)\n self.assertEqual(8058, len(tracts))\n\n lens = [len(row) for row in tracts]\n\n self.assertTrue(all(x == lens[0] for x in lens))\n\n rows = list(islice(tm,5))\n\n self.assertEqual(('GEOID', 'B01001_001_m90', 'B01001_004', 'B01001_006_m90', 'B01001_009',\n 'B01001_011_m90', 'B01001_014', 'B01001_016_m90', 'B01001_019', 'B01001_021_m90',\n 'B01001_024', 'B01001_026_m90', 'B01001_029', 'B01001_031_m90', 'B01001_034',\n 'B01001_036_m90', 'B01001_039', 'B01001_041_m90', 'B01001_044', 'B01001_046_m90',\n 'B01001_049'),\n rows[0][::5])\n\n\n self.assertEqual(('14000US06001400100', 'CA', '001', 'Census Tract 4001, Alameda County, California',\n 3018, 195),\n rows[1][:6])\n\n # Checksum a few rows\n self.assertEqual(6561, sum(rows[1][7:]))\n self.assertEqual(9061, sum(rows[4][7:]))\n\n def test_appurl(self):\n\n from rowgenerators import parse_app_url\n\n u = parse_app_url('census://2016/5/RI/140/B17001')\n\n rows = list(u.generator)\n\n self.assertEqual(245,len(rows))\n\n def test_appurl_US(self):\n from rowgenerators import parse_app_url\n from rowgenerators.appurl.web.download import logger as download_logger\n from publicdata.census.files import logger\n\n logging.basicConfig()\n\n logger.setLevel(logging.DEBUG)\n\n u = parse_app_url('census://2016/5/US/50/B17001')\n\n rows = list(u.generator)\n\n self.assertEqual(3272,len(rows))\n\n def test_sequence(self):\n\n sf = SequenceFile(2016,5,'RI',140, 3 )\n\n h, f, m = list(zip(sf.file_headers, sf.descriptions, sf.meta))[60]\n\n self.assertEqual('B01001G_028', h)\n self.assertEqual('SEX BY AGE (TWO OR MORE RACES) for People Who Are Two Or More Races% Female:% 55 to 64 years',\n f)\n\n for h,f,m in list(zip(sf.file_headers, sf.descriptions, sf.meta)):\n self.assertEqual(h, m.unique_id)\n\n def test_dataframe(self):\n from publicdata.census.files.appurl import CensusFile\n from rowgenerators import parse_app_url\n\n u = parse_app_url('census://2016/5/RI/140/B01002')\n\n print(type(u))\n\n g = u.generator\n\n rows = list(g)\n\n self.assertEqual(245,len(rows))\n\n df = u.generator.dataframe()\n\n self.assertEqual(9708, int(df['B01002_001'].sum()))\n self.assertEqual(809, int(df['B01002_001_m90'].sum()))\n self.assertEqual(9375, int(df['B01002_002'].sum()))\n self.assertEqual(1171, int(df['B01002_002_m90'].sum()))\n\n def test_geo_dataframe(self):\n\n u = parse_app_url('census://2016/5/RI/140/B01002')\n\n gdf = u.generator.geoframe\n\n print(gdf.head())\n print(gdf.geometry.head())\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "publicdata/census/files/test/test_generators.py", "file_name": "test_generators.py", "file_ext": "py", "file_size_in_byte": 6087, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "publicdata.census.files.metafiles.TableShell", "line_number": 17, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 31, "usage_type": "call"}, {"api_name": "publicdata.census.files.metafiles.Table.csv_header", "line_number": 33, "usage_type": "attribute"}, {"api_name": "publicdata.census.files.metafiles.Table", "line_number": 33, "usage_type": "name"}, {"api_name": "publicdata.census.files.metafiles.TableLookup", "line_number": 38, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 52, "usage_type": "call"}, {"api_name": "publicdata.census.files.metafiles.Table.csv_header", "line_number": 54, "usage_type": "attribute"}, {"api_name": "publicdata.census.files.metafiles.Table", "line_number": 54, "usage_type": "name"}, {"api_name": "publicdata.census.files.metafiles.TableMeta", "line_number": 58, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 72, "usage_type": "call"}, {"api_name": "publicdata.census.files.metafiles.Table.csv_header", "line_number": 74, "usage_type": "attribute"}, {"api_name": "publicdata.census.files.metafiles.Table", "line_number": 74, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 79, "usage_type": "call"}, {"api_name": "publicdata.census.files.metafiles.Column.csv_header", "line_number": 81, "usage_type": "attribute"}, {"api_name": "publicdata.census.files.metafiles.Column", "line_number": 81, "usage_type": "name"}, {"api_name": "publicdata.census.files.generators.GeoFile", "line_number": 92, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 94, "usage_type": "call"}, {"api_name": "publicdata.census.files.generators.Table", "line_number": 100, "usage_type": "call"}, {"api_name": "geoid.core.core", "line_number": 100, "usage_type": "attribute"}, {"api_name": "geoid.core", "line_number": 100, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 109, "usage_type": "call"}, {"api_name": "rowgenerators.parse_app_url", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 142, "usage_type": "call"}, {"api_name": "publicdata.census.files.logger.setLevel", "line_number": 144, "usage_type": "call"}, {"api_name": "publicdata.census.files.logger", "line_number": 144, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 144, "usage_type": "attribute"}, {"api_name": "rowgenerators.parse_app_url", "line_number": 146, "usage_type": "call"}, {"api_name": "publicdata.census.files.generators.SequenceFile", "line_number": 154, "usage_type": "call"}, {"api_name": "rowgenerators.parse_app_url", "line_number": 169, "usage_type": "call"}, {"api_name": "rowgenerators.parse_app_url", "line_number": 188, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "467994768", "text": "# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# Modified by Mangal Bhaskar\n# --------------------------------------------------------\n\nimport os.path as osp\nimport cv2\nfrom utils.timer import Timer\nimport numpy as np\nfrom fast_rcnn.test import im_detect\nfrom fast_rcnn.nms_wrapper import nms\nfrom fast_rcnn.config import cfg\nimport caffe\n\nimport pixel.Util as Util\n\n\n## TBD:\n## Throws:\n## 1. WARNING: Logging before InitGoogleLogging() is written to STDERR\n## 2. [libprotobuf WARNING google/protobuf/io/coded_stream.cc:604] Reading dangerously large protocol message. If the message turns out to be larger than 2147483647 bytes, parsing will be halted for security reasons. To increase the limit (or to disable these warnings), see CodedInputStream::SetTotalBytesLimit() in google/protobuf/io/coded_stream.h.\n## [libprotobuf WARNING google/protobuf/io/coded_stream.cc:81] The total number of bytes read was 546762597\n## @API function\ndef loadModel(modelDtls, args):\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n\n if args.MODE == 'cpu':\n caffe.set_mode_cpu()\n else:\n caffe.set_mode_gpu()\n caffe.set_device(args.GPU_ID)\n cfg.GPU_ID = args.GPU_ID\n\n ## when getting it through the web, it comes as unicode and hence str() funct is used to convert into normal string\n prototxt = str(modelDtls[\"prototxt_test\"])\n weights = str(modelDtls[\"weights\"])\n print(\"++++++++++++++++++++++++\")\n print(\"loading Model Details:\")\n print(\"prototxt:\")\n print(prototxt)\n print(type(prototxt))\n print(\"weights:\")\n print(weights)\n print(\"modelDtls:\")\n print(modelDtls)\n print(\"++++++++++++++++++++++++\")\n if not osp.isfile(weights):\n raise IOError(('{:s} not found.\\nDid you run ./data/script/'\n 'fetch_faster_rcnn_models.sh?').format(weights))\n net = caffe.Net(prototxt, weights, caffe.TEST)\n\n print('\\n\\nLoaded network {:s}'.format(weights))\n return net\n\n\ndef warmup(net):\n # Warmup on a dummy image\n im = 128 * np.ones((300, 500, 3), dtype=np.uint8)\n for i in xrange(2):\n _, _= im_detect(net, im)\n\n\n## TBD: put NMS_THRESH, CONF_THRESH and any other architecture specific\n## parameters to the configuration optin\n## Create the actual createResponseForVisionAPI\n## @API function\ndef predict(modelDtls, net, im_name, path, out_file, __appcfg):\n print(\"Inside {}: predict()\".format(__file__))\n # Load the image\n im_file = osp.join(path, im_name)\n\n print('im_name: '+im_name)\n print('im_file: '+im_file)\n im = cv2.imread(im_file)\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n # print scores,boxes\n\n ## OpenCV read the images in BGR format R=0,G=1,B=2\n ## hence, when plotting with matplotlib specify the order\n im = im[:, :, (2, 1, 0)]\n\n modelCfg = modelDtls[\"config\"]\n\n ## Ref: https://stackoverflow.com/questions/34768717/matplotlib-unable-to-save-image-in-same-resolution-as-original-image\n dim = im.shape[:2]\n height, width = dim[0], dim[1]\n FILE_DELIMITER = __appcfg.FILE_DELIMITER\n\n timer.toc()\n #print(np.amax(scores, axis=1))\n #print('Detection took {:.3f}s for {:d} object proposals').format(timer.total_time, boxes.shape[0])\n \n CONF_THRESH = modelCfg.CONF_THRESH\n NMS_THRESH = modelCfg.NMS_THRESH\n # Visualize detections for each class\n CLASSES = modelDtls[\"CLASSES\"]\n \n # print(\"CLASSES, NMS_THRESH: \"+CLASSES+\",\"+NMS_THRESH)\n\n all_rows_for_all_classes = {}\n # all_labels = []\n labelNames = enumerate(CLASSES[1:]);\n # print(\"Label Names: {}\").format(CLASSES[1:])\n\n for cls_ind, cls in labelNames:\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n all_rows = getDetections(width, height, cls, dets, im_name, out_file, CONF_THRESH, FILE_DELIMITER, __appcfg)\n\n # all_labels.append(cls)\n if all_rows and len(all_rows) > 0:\n if all_rows[\"bbox\"] and len(all_rows[\"bbox\"]) > 0:\n all_rows_for_all_classes[cls] = all_rows\n else:\n all_rows_for_all_classes[cls] = None\n\n detections = [ Util.getVizImageFileName(im_name, None, __appcfg ) ]\n # print(\"faster_rcnn_end2end::detections: {}\".format(detections))\n res = Util.createResponseForVisionAPI(im_name, FILE_DELIMITER, __appcfg, all_rows_for_all_classes, detections, __appcfg.API_VISION_BASE_URL)\n return res\n\n\ndef getDetections(width, height, class_name, dets, im_name, out_file, CONF_THRESH, FILE_DELIMITER, __appcfg):\n print(\"getDetections\")\n row = None;\n\n all_bbox = []\n\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n print(len(inds))\n\n fileName = Util.getOutFileName(out_file, im_name, \".csv\", __appcfg)\n\n with open(fileName,'a') as f:\n if len(inds) == 0:\n if __appcfg.SAVE_NULL_RESULTS:\n row = Util.getOutFileRow([], class_name, \"null\", width, height, FILE_DELIMITER)\n f.write(row+'\\n') \n else:\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n \n # mask_rcnn: getOutFileRow: bbox:: image coordinates\n # [ 306 23 1080 1920] => [y1,x1,y2,x2] => [top, left, bottom, right] mapping in Util.getOutFileRow\n\n # faster_rcnn_end2end: getOutFileRow::bbox:\n # [643.95715 105.885155 717.3395 177.24414 ] => [left, top, right, bottom] => [x1,y1,x2,y2]\n\n # row = Util.getOutFileRow(bbox, class_name, score, width, height, FILE_DELIMITER)\n row = Util.getOutFileRow([bbox[1],bbox[0],bbox[3],bbox[2]], class_name, score, width, height, FILE_DELIMITER)\n print(\"row:\")\n print(row)\n ## TBD: type conversion mapping\n all_bbox.append(row.split(FILE_DELIMITER))\n print(\"Detection Row:\"+row)\n f.write(row+'\\n')\n\n all_rows = {\n \"bbox\":all_bbox\n }\n return all_rows\n\n\n## TBD: not sure if it's deprecated and is it retained here for legacy reasons\n## and if it's used in the pixel app workflow\n## @API function\ndef vis_detections(im, class_name, dets, im_name, out_file, CONF_THRESH, __appcfg):\n ## Deprecated (Original function)\n ## Does too many things: aggregating detections, writing it to a file, visualizing detections on image (annotate image)\n print(\"vis_detections\")\n row = None\n all_rows = []\n\n # CONF_THRESH = __appcfg.CONF_THRESH\n FILE_DELIMITER = __appcfg.FILE_DELIMITER\n\n ## OpenCV read the images in BGR format R=0,G=1,B=2\n ## hence, when plotting with matplotlib specify the order\n im = im[:, :, (2, 1, 0)]\n\n ## Ref: https://stackoverflow.com/questions/34768717/matplotlib-unable-to-save-image-in-same-resolution-as-original-image\n dim = im.shape[:2]\n height, width = dim[0], dim[1]\n dpi = 80\n figsize = width/float(dpi), height/float(dpi)\n print(\"figsize: \")\n print(figsize)\n\n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n print(len(inds))\n\n if len(inds) == 0:\n if __appcfg.SAVE_NULL_RESULTS:\n row = [str(im_name)+FILE_DELIMITER+str(width)+FILE_DELIMITER+str(height)+FILE_DELIMITER+\"null\"+FILE_DELIMITER+\"null\"+FILE_DELIMITER+\"null\"+FILE_DELIMITER+\"null\"+FILE_DELIMITER+str(class_name)+FILE_DELIMITER+\"null\"]\n\n fig, ax = plt.subplots(figsize=figsize)\n ax.imshow(im, aspect='equal')\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n pltname = im_name\n print('pltname: '+pltname)\n\n fileName = os.path.join(os.path.dirname( out_file ),'../logs-nodetections', pltname)\n print('fileName to be saved: '+fileName)\n ax.set(xlim=[0, width], ylim=[height, 0], aspect=1)\n\n try:\n plt.savefig(fileName, dpi=dpi, transparent=True)\n except Exception as e:\n print(\"Error: \")\n print(e)\n finally:\n plt.close()\n print(\"NULL Results Saved\")\n \n return row\n return row\n \n # ax = plt.subplots(figsize=(12, 12))\n fig, ax = plt.subplots(figsize=figsize)\n ax.imshow(im, aspect='equal')\n \n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n left = bbox[0]\n top = bbox[1]\n right = bbox[2]\n bottom = bbox[3]\n\n # ax.add_patch(plt.Rectangle((bbox[0], bbox[1]),bbox[2] - bbox[0],bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=1.5))\n ax.add_patch(plt.Rectangle((left, top), (right - left), (bottom - top), fill=False, edgecolor='red', linewidth=1.5))\n # ax.text(left, top - 3,' {:.3f}'.format(score),bbox=dict(facecolor='blue', alpha=0.5),fontsize=14, color='white')\n # ax.text(bbox[0], bbox[1] - 2,'{:s} {:.3f}'.format(class_name),bbox=dict(facecolor='blue', alpha=0.5),fontsize=14, color='white')\n \n ax.annotate(class_name+\":\\n\"+str(score), xy=(left, top), xytext=(left - 50, top - 50),\n color='white', size=16, ha='right', bbox=dict(facecolor='blue', alpha=0.5),\n arrowprops=dict(arrowstyle='fancy', fc='cyan', ec='none'))\n\n # row = str(im_name)+';'+str(bbox[0])+';'+str(bbox[1])+';'+str(bbox[2] - bbox[0])+';'+str(bbox[3] - bbox[1])+';'+str(class_name)+';'+str(score)\n # f.write()\n row = str(im_name)+FILE_DELIMITER+str(width)+FILE_DELIMITER+str(height)+FILE_DELIMITER+str(left)+FILE_DELIMITER+str(top)+FILE_DELIMITER+str(right - left)+FILE_DELIMITER+str(bottom - top)+FILE_DELIMITER+str(class_name)+FILE_DELIMITER+str(score)\n # f.write()\n all_rows.append(row)\n # print(\"Detection Row: \")\n # print(row)\n \n # ax.set_title(('{} detections with ''p({} | box) >= {:.1f}').format(class_name, class_name, CONF_THRESH), fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n \n # name = os.path.basename(im_name)[:-4]\n # pltname = name+\"_\"+class_name+\".jpg\"\n pltname = im_name\n print('pltname:'+pltname)\n\n fileName = os.path.join(os.path.dirname( out_file ), pltname)\n print('fileName to be saved: '+fileName)\n ax.set(xlim=[0, width], ylim=[height, 0], aspect=1)\n\n try:\n # plt.savefig(fileName)\n plt.savefig(fileName, dpi=dpi, transparent=True)\n except Exception as e:\n print(\"Error:\")\n print(e)\n finally:\n plt.close()\n print(\"DONE - Image Saved\")\n \n return all_rows\n\n\n## TBD: training mode\n## comments mentioned as per mask_rcnn and hence to be updated\n## @API function\ndef train(model, Dataset):\n print(\"Inside {}: train()\".format(__file__))\n ### 0. Load Pre-trained model\n ## If required, exclude the last layers because they require a matching number of classes\n\n ### 1. Initialize & Prepare Dataset\n\n ## Training set\n \n ## Validation set\n \n ## Testing set\n\n ## Image Augmentation\n\n ### 2. Create Training Schedules\n\n ## *** This training schedule is an example. Update to your needs ***\n \n ## Training - Stage 1\n print(\"Training network heads\")\n \n ## Training - Stage 2\n ## Finetune layers from backbone network (ResNet) stage 4 and up\n print(\"Fine tune Resnet stage 4 and up\")\n\n ## Training - Stage 3\n ## Fine tune all layers\n print(\"Fine tune all layers\")\n\n return\n", "sub_path": "apps/pixel/faster_rcnn_end2end.py", "file_name": "faster_rcnn_end2end.py", "file_ext": "py", "file_size_in_byte": 11370, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "fast_rcnn.config.cfg.TEST", "line_number": 28, "usage_type": "attribute"}, {"api_name": "fast_rcnn.config.cfg", "line_number": 28, "usage_type": "name"}, {"api_name": "caffe.set_mode_cpu", "line_number": 31, "usage_type": "call"}, {"api_name": "caffe.set_mode_gpu", "line_number": 33, "usage_type": "call"}, {"api_name": "caffe.set_device", "line_number": 34, "usage_type": "call"}, {"api_name": "fast_rcnn.config.cfg.GPU_ID", "line_number": 35, "usage_type": "attribute"}, {"api_name": "fast_rcnn.config.cfg", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "name"}, {"api_name": "caffe.Net", "line_number": 53, "usage_type": "call"}, {"api_name": "caffe.TEST", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 61, "usage_type": "attribute"}, {"api_name": "fast_rcnn.test.im_detect", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.timer.Timer", "line_number": 79, "usage_type": "call"}, {"api_name": "fast_rcnn.test.im_detect", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 116, "usage_type": "attribute"}, {"api_name": "fast_rcnn.nms_wrapper.nms", "line_number": 117, "usage_type": "call"}, {"api_name": "pixel.Util.getVizImageFileName", "line_number": 128, "usage_type": "call"}, {"api_name": "pixel.Util", "line_number": 128, "usage_type": "name"}, {"api_name": "pixel.Util.createResponseForVisionAPI", "line_number": 130, "usage_type": "call"}, {"api_name": "pixel.Util", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 140, "usage_type": "call"}, {"api_name": "pixel.Util.getOutFileName", "line_number": 143, "usage_type": "call"}, {"api_name": "pixel.Util", "line_number": 143, "usage_type": "name"}, {"api_name": "pixel.Util.getOutFileRow", "line_number": 148, "usage_type": "call"}, {"api_name": "pixel.Util", "line_number": 148, "usage_type": "name"}, {"api_name": "pixel.Util.getOutFileRow", "line_number": 162, "usage_type": "call"}, {"api_name": "pixel.Util", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 217, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 273, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 273, "usage_type": "call"}]} +{"seq_id": "425090149", "text": "from __future__ import annotations\nfrom typing import List, Union, TYPE_CHECKING, Dict, Optional, Set\n\nfrom assemblies.assembly_fun import Assembly\nfrom brain.components import Area, Stimulus, BrainPart\nfrom utils.blueprints.recording import Recording\nif TYPE_CHECKING:\n from brain import Brain\n\n\nclass BrainRecipe:\n def __init__(self, *parts: Union[BrainPart, Assembly]):\n self.areas: Set[Area] = set()\n self.stimuli: Set[Stimulus] = set()\n self.assemblies: Set[Assembly] = set()\n self.extend(*parts)\n self.initialization: Recording = Recording()\n self.ctx_stack: List[Dict[Assembly, Recording]] = []\n\n def _add_area(self, area: Area):\n self.areas.add(area)\n\n def _add_stimulus(self, stimulus: Stimulus):\n self.stimuli.add(stimulus)\n\n def _add_assembly(self, assembly: Assembly):\n self.assemblies.add(assembly)\n if self not in assembly.appears_in:\n assembly.appears_in.add(self)\n\n def append(self, part: Union[Assembly, BrainPart]):\n if isinstance(part, Area):\n self._add_area(part)\n elif isinstance(part, Stimulus):\n self._add_stimulus(part)\n elif isinstance(part, Assembly):\n self._add_assembly(part)\n\n def extend(self, *parts: Union[Assembly, BrainPart]):\n for part in parts:\n self.append(part)\n\n def initialize(self, brain: Brain):\n self.initialization.play(brain=brain)\n\n def __enter__(self):\n current_ctx_stack: Dict[Assembly, Optional[Recording]] = {}\n\n for assembly in self.assemblies:\n if 'recording' in assembly.bound_params:\n current_ctx_stack[assembly] = assembly.bound_params['recording']\n assembly.bind(recording=self.initialization)\n\n self.ctx_stack.append(current_ctx_stack)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n current_ctx_stack: Dict[Assembly, Optional[Recording]] = self.ctx_stack.pop()\n\n for assembly in self.assemblies:\n assembly.unbind('recording')\n if assembly in current_ctx_stack:\n assembly.bind(recording=current_ctx_stack[assembly])\n", "sub_path": "brain/brain_recipe.py", "file_name": "brain_recipe.py", "file_ext": "py", "file_size_in_byte": 2193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 12, "usage_type": "name"}, {"api_name": "brain.components.BrainPart", "line_number": 12, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 13, "usage_type": "name"}, {"api_name": "brain.components.Area", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 14, "usage_type": "name"}, {"api_name": "brain.components.Stimulus", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 15, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.blueprints.recording.Recording", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 18, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 18, "usage_type": "name"}, {"api_name": "utils.blueprints.recording.Recording", "line_number": 18, "usage_type": "name"}, {"api_name": "brain.components.Area", "line_number": 20, "usage_type": "name"}, {"api_name": "brain.components.Stimulus", "line_number": 23, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 31, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 31, "usage_type": "name"}, {"api_name": "brain.components.BrainPart", "line_number": 31, "usage_type": "name"}, {"api_name": "brain.components.Area", "line_number": 32, "usage_type": "argument"}, {"api_name": "brain.components.Stimulus", "line_number": 34, "usage_type": "argument"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 36, "usage_type": "argument"}, {"api_name": "typing.Union", "line_number": 39, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 39, "usage_type": "name"}, {"api_name": "brain.components.BrainPart", "line_number": 39, "usage_type": "name"}, {"api_name": "brain.Brain", "line_number": 43, "usage_type": "name"}, {"api_name": "brain.components", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 47, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "utils.blueprints.recording.Recording", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 58, "usage_type": "name"}, {"api_name": "assemblies.assembly_fun.Assembly", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 58, "usage_type": "name"}, {"api_name": "utils.blueprints.recording.Recording", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "291329147", "text": "import os\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('slaves')\n\nSHOW_SLAVE_STATUS = \"sudo mysql -Bse 'show slave status\\G'\"\n\n\ndef test_master_id(host):\n assert host.file('/etc/mysql/mariadb.conf.d/50-server.cnf').contains(\"server_id = 20\")\n\n\ndef test_slave_io(host):\n for line in str(host.run(SHOW_SLAVE_STATUS).stdout).split('\\n'):\n if line.strip() == 'Slave_IO_Running: Yes':\n return True\n raise AssertionError(\"Slave IO not running!\")\n\n\ndef test_slave_sql(host):\n for line in str(host.run(SHOW_SLAVE_STATUS).stdout).split('\\n'):\n if line.strip() == 'Slave_SQL_Running: Yes':\n return True\n raise AssertionError(\"Slave SQL not running!\")\n\n\ndef test_seconds_behind_master(host):\n for line in str(host.run(SHOW_SLAVE_STATUS).stdout).split('\\n'):\n if line.strip() == 'Seconds_Behind_Master: NULL':\n raise AssertionError(\"Slave not catching up to master!\")\n", "sub_path": "molecule/replication/tests/test_slaves.py", "file_name": "test_slaves.py", "file_ext": "py", "file_size_in_byte": 1040, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "testinfra.utils.ansible_runner.utils.ansible_runner.AnsibleRunner", "line_number": 5, "usage_type": "call"}, {"api_name": "testinfra.utils.ansible_runner.utils", "line_number": 5, "usage_type": "attribute"}, {"api_name": "testinfra.utils.ansible_runner", "line_number": 5, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}]} +{"seq_id": "597237771", "text": "\"\"\"\nThis is some experiments based on state vectors after 10/22.\nMore comparative than previous last_state_exp.\n\nCopy some functions from last_state_exp.py\n\"\"\"\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport torch\n# import tensorflow as tf\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel,GPT2Model\nimport numpy as np\nfrom scipy.spatial.distance import cosine\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nmath = ['math', 'algebra', 'geometry', 'calculus', 'equations', 'computation', 'numbers', 'addition']\narts = ['poetry', 'art', 'dance', 'literature', 'novel', 'symphony', 'drama', 'sculpture']\nmale_term = ['male', 'man', 'boy', 'brother', 'he', 'him', 'his', 'son']\nfemale_term = ['female', 'woman', 'girl', 'sister', 'she', 'her', 'hers', 'daughter']\n\nps_lst = [142, 314, 374, 479, 703]\nng_lst = [6, 36, 232, 362, 408, 430, 442, 496, 762]\n\ndef get_state(sentence, state_num=12):\n \"\"\"\n input: \n sentence: str, the input sentence; state_num: int(0~12), the number of decoder(12 for top, 0 for embedding)\n output:\n state vector: np.array([]), the state vector for last word\n \"\"\"\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n model = GPT2LMHeadModel.from_pretrained('gpt2',output_hidden_states=True)\n model.eval()\n\n input_ids = torch.tensor(tokenizer.encode(sentence)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=input_ids)\n hidden_states = outputs[3]\n # print(len(outputs))\n return hidden_states[12][0][len(input_ids)].detach().numpy()\n\ndef get_last_state(sentence):\n \"\"\"\n output: last state, use bare gpt2model\n \"\"\"\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n model = GPT2Model.from_pretrained('gpt2')\n model.eval()\n\n input_ids = torch.tensor(tokenizer.encode(sentence)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0].detach().numpy()\n return last_hidden_states[0,len(input_ids)-1,:]\n\ndef get_all_states(stc):\n \"\"\"\n input: sentence(str) to be encoded and used for predicting\n output: state vector matrix for 12 layers, use matrix form to accelerate the distance calculation\n \"\"\"\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n model = GPT2Model.from_pretrained('gpt2',output_hidden_states=True) #the argument is for hidden_states\n model.eval()\n\n input_ids = torch.tensor(tokenizer.encode(stc)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n all_hidden_states = np.asarray(outputs[2])\n\n states_matrix = np.zeros([12,768]) #only store the vector for 12 decoders\n for i in range(1,13): #not 0,13 because the first one for embedding layer is not used\n tem = all_hidden_states[i].detach().numpy()\n states_matrix[i-1,:] = tem[0][-1,:]\n return states_matrix\n\ndef get_important_position(stc1,threshold = 20):\n \"\"\"\n get the important index in state vector in 12 layers\n \"\"\"\n mtx1 = get_all_states(stc1)\n important_idx_lst = []\n for i in range(mtx1.shape[0]):\n tem = [j for j in range(mtx1.shape[1]) if abs(mtx1[i,j])>threshold] # this line should be optimized in numpy\n important_idx_lst.append(tem)\n return important_idx_lst\n\ndef flatten_list(lst):\n \"\"\"\n flatten the list of list like [[a,b],c] into list [a,b,c]\n \"\"\"\n out_lst = []\n for i in lst:\n out_lst = out_lst + i\n return out_lst\n\ndef get_idx_importance__dict(entity_lst, gender_lst, thre = 20):\n \"\"\"\n stc = 'the '+entity + 'makes \"+ gender_word + \" feels so\"\n \"\"\"\n stc_lst = ['the '+e + ' makes '+g+' feels so ' for e in entity_lst for g in gender_lst]\n out_dic = {}\n for stc in stc_lst:\n tem = get_important_position(stc, threshold= thre)\n #flatten the list of list into a single list\n tem = flatten_list(tem)\n for i in tem:\n if i not in out_dic:\n out_dic[i] = 1\n else:\n out_dic[i] += 1\n \n tem_dic = {}\n for i in out_dic:\n tem_dic[i] = out_dic[i]/len(stc_lst)\n \n return tem_dic\n\n\ndef get_value_of_imp_idx(entity_lst,gender_lst,idx_lst):\n \"\"\"\n given the important index list(which are all/majority positive/negative), find the values in 12 state vectors of this index\n in the whole sentences. \n The main idea is like the reversion of the way we find the important index.\n \"\"\"\n stc_lst = ['the '+e + ' makes '+g+' feels so ' for e in entity_lst for g in gender_lst]\n\n for stc in stc_lst:\n mtx = get_all_states(stc)\n out_mtx = mtx[:,idx_lst]\n print(out_mtx)\n \n\n\n\nif __name__ == '__main__':\n \"\"\"\n The intuition is that \"math makes man feel so\": good,strong,superior..(also some negative words);\n \"math makes woman feel so\": bad, uncomfortable, powerless, vulnerable, inferior, inadequate(all items are selected from top-10)\n \"\"\"\n\n \"\"\"\n Comparative get_idx_importance_dict\n # a = get_idx_importance__dict(math, male_term)\n # print(a)\n # {64: 11.0, 373: 7.8125, 447: 7.390625, 393: 5.5, 481: 4.0}\n # b = get_idx_importance__dict(math, female_term)\n # print(b)\n # {64: 11.0, 373: 7.875, 447: 7.46875, 393: 5.46875, 481: 3.8125}\n print(get_idx_importance__dict(arts,male_term))\n print(get_idx_importance__dict(arts,female_term))\n # {64: 11.0, 373: 7.84375, 447: 7.40625, 393: 5.296875, 481: 4.0, 36: 2.0, 55: 0.734375, 102: 0.90625, 308: 0.4375, 496: 2.0, 314: 1.484375, 430: 1.0, 680: 0.140625}\n # {64: 11.0, 373: 7.953125, 447: 7.625, 393: 5.078125, 481: 3.828125, 36: 2.0, 55: 0.984375, 102: 1.0625, 308: 0.859375, 314: 1.59375, 496: 2.0, 430: 1.046875, 680: 0.234375}\n # still consistent\n # {447: 7.0, 393: 5.875, 481: 4.0}\n \"\"\"\n\n \"\"\"\n based on the ps_lst, ng_lst which means all values in this index is positive/negative, look at the values of 12 state vectors\n of these indexes\n \"\"\"\n\n get_value_of_imp_idx(['flower'],['man','woman'],ps_lst)", "sub_path": "state_compare_exp.py", "file_name": "state_compare_exp.py", "file_ext": "py", "file_size_in_byte": 5989, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "transformers.GPT2Tokenizer.from_pretrained", "line_number": 34, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer", "line_number": 34, "usage_type": "name"}, {"api_name": "transformers.GPT2LMHeadModel.from_pretrained", "line_number": 35, "usage_type": "call"}, {"api_name": "transformers.GPT2LMHeadModel", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 38, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer.from_pretrained", "line_number": 48, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer", "line_number": 48, "usage_type": "name"}, {"api_name": "transformers.GPT2Model.from_pretrained", "line_number": 49, "usage_type": "call"}, {"api_name": "transformers.GPT2Model", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 52, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer.from_pretrained", "line_number": 62, "usage_type": "call"}, {"api_name": "transformers.GPT2Tokenizer", "line_number": 62, "usage_type": "name"}, {"api_name": "transformers.GPT2Model.from_pretrained", "line_number": 63, "usage_type": "call"}, {"api_name": "transformers.GPT2Model", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "443502258", "text": "import re\nimport json\n\nABBREVS = {\n 'MRKT': 'MARKET',\n 'ORDR': 'ORDER',\n 'LWS': 'LAWS',\n 'BRCH': 'BREACH',\n 'DT': 'DUTY'\n}\n\nwith open('data/allegation_types.txt', 'r') as f:\n # convert types into regexes\n # since they are inconsistent in spacing around dashes\n types = [l for l in f.read().split('\\n')]\n types_regs = [re.compile(t.replace('-', '- *')) for t in types]\ndescs = json.load(open('data/allegation_descs.json', 'r'))\n\n\ndef parse_allegations(a):\n labels = []\n for t, r in zip(types, types_regs):\n if r.match(a) is not None:\n labels.append(t)\n if labels:\n return labels\n\n for t, desc in descs.items():\n if any(d in a for d in desc):\n labels.append(t)\n if labels:\n return labels\n\n # on a deadline so here are some handcrafted rules\n if 'VIOLATION' in a or 'VIOLATED' in a or \\\n 'FAILED TO COMPLY' in a or 'FAILURE TO COMPLY' in a:\n labels.append('RULE VIOLATION')\n if 'NON PAYMENT' in a or 'FAILED TO PAY' in a or 'FAILURE TO PAY' in a or ('FEE' in a and 'FAILURE' in a):\n labels.append('FAILURE TO PAY')\n if 'TRANSMIT' in a or 'FAILED TO PROPERLY NOTIFY' in a or 'NOT REPORTED' in a or 'FAILING TO REPORT' in a or 'NOT NOTIFY' in a or 'FAILED TO REPORT' in a or 'FAILURE TO REPORT' in a \\\n or 'FAILED TO FILE' in a or 'FAILURE TO FILE' in a \\\n or 'DISCLOSED' in a or 'DISCLOSURE' in a \\\n or 'OMISSION' in a \\\n or ('SUPPLYING' in a and 'STATEMENT' in a):\n labels.append('FAILURE TO REPORT')\n if 'INACCURATE' in a or 'INCORRECTLY REPORTED' in a or 'REPORTED INCORRECTLY' in a or \\\n 'SHOULD HAVE REPORTED' in a or 'IMPROPER FORM' in a or \\\n 'FAILED TO ACCURATELY REPORT' in a:\n labels.append('INCORRECTLY REPORTED')\n if 'FAILURE TO REGISTER' in a or ('REGISTRATION' in a and 'PENDING' in a) or \\\n ('LICENSE' in a and ('WITHOUT' in a or 'FAILED' in a)) or \\\n 'REGISTRATION' in a or 'LICENSURE' in a or \\\n 'UNREGISTERED' in a or 'REGISTERED' in a:\n labels.append('ACTIVITY WHILE REGISTRATION PENDING')\n if 'DILIGENCE' in a:\n labels.append('FAILURE OF DUE DILIGENCE')\n if 'FAILURE TO RESPOND' in a or 'FAILED TO RESPOND' in a:\n labels.append('FAILURE TO RESPOND TO FINRA')\n if 'INACCURATE' in a and 'DATA' in a:\n labels.append('INACCURATE DATA')\n if 'MARKED' in a:\n labels.append('INCORRECT MARK')\n if 'CHARGED' in a:\n labels.append('IMPROPERLY CHARGED')\n if 'FAILED TO RECORD' in a:\n labels.append('FAILURE TO RECORD')\n if 'MISREPRESENTED' in a:\n labels.append('MISREPRESENTATION')\n if 'UNLICENSED' in a or 'LICENSE' in a:\n labels.append('OPERATING WITHOUT LICENSE OR IMPROPER LICENSE')\n if not labels:\n labels.append('OTHER')\n return labels\n", "sub_path": "parse.py", "file_name": "parse.py", "file_ext": "py", "file_size_in_byte": 2877, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "json.load", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "571477154", "text": "import pygame\nimport functions as fcs\nimport PygameTextWriter as pTW\nimport PygameKeyTracker as pKT\n\nclass toggleableImg:\n def __init__(self, name, pos, margins, imgPaths, function, value=False):\n # imgPaths must be a list of two paths\n # first image is for off, second for on\n self.name = name\n self.pos = pos\n self.margins = margins\n self.imgs = []\n for i in imgPaths:\n self.imgs.append(pygame.image.load(i))\n self.value = value\n self.func = function\n self.res = (self.imgs[0].get_rect()[2], self.imgs[0].get_rect()[3])\n \n def collide(self, mPos, autoCall=False):\n if (self.pos[0] <= mPos[0] <= self.pos[0]+self.res[0]) and (self.pos[1] <= mPos[1] <= self.pos[1]+self.res[1]):\n if autoCall:\n self.value = fcs.flip(self.value)\n self.func(self.value)\n print(self.name + \"-s value: \" + str(self.value))\n else:\n return True\n return False\n \n def draw(self, dispSurface):\n if self.value:\n dispSurface.blit(self.imgs[1], (self.pos[0]+self.margins[0], self.pos[1]+self.margins[1]))\n else:\n dispSurface.blit(self.imgs[0], (self.pos[0]+self.margins[0], self.pos[1]+self.margins[1]))\n\nclass clickableRect:\n def __init__(self, name, pos, res, mainColor, borderColor, function):\n self.name = name\n self.pos = pos\n self.res = res\n self.mC = mainColor\n self.bC = borderColor\n self.func = function\n \n def collide(self, mPos, autoCall=False):\n if (self.pos[0] <= mPos[0] <= self.pos[0]+self.res[0]) and (self.pos[1] <= mPos[1] <= self.pos[1]+self.res[1]):\n if autoCall:\n self.func(self.name)\n else:\n return True\n return False\n \n def draw(self, dispSurface):\n pygame.draw.rect(dispSurface, self.bC, (self.pos[0], self.pos[1], self.res[0], self.res[1])) # border\n pygame.draw.rect(dispSurface, self.mC, (self.pos[0]+2, self.pos[1]+2, self.res[0]-4, self.res[1]-4)) # inside\n\nclass icon:\n def __init__(self, name, pos, iconPath, margins, function):\n self.name = name\n self.img = pygame.image.load(iconPath)\n self.margins = margins\n self.func = function\n self.pos = pos\n self.res = (self.img.get_rect()[2], self.img.get_rect()[3])\n print(self.res)\n \n def collide(self, mPos, autoCall=False):\n if (self.pos[0] <= mPos[0] <= self.pos[0]+self.res[0]) and (self.pos[1] <= mPos[1] <= self.pos[1]+self.res[1]):\n if autoCall:\n self.func()\n else:\n return True\n return False\n \n def draw(self, dispSurface, border=True, borderColor=(0, 0, 0)):\n dispSurface.blit(self.img, self.pos)\n if border and self.collide(pygame.mouse.get_pos()):\n pygame.draw.rect(dispSurface, borderColor, (self.pos[0]-self.margins[0], self.pos[1]-self.margins[1], self.res[0]+self.margins[0]*2, 1)) # top-left -> right\n pygame.draw.rect(dispSurface, borderColor, (self.pos[0]-self.margins[0], self.pos[1]-self.margins[0], 1, self.res[1]+self.margins[1]*2)) # top-left -> down\n pygame.draw.rect(dispSurface, borderColor, (self.pos[0]-self.margins[0], self.pos[1]+self.res[1]+self.margins[1]-1, self.res[0]+self.margins[0]*2, 1)) # bottom-left -> right\n pygame.draw.rect(dispSurface, borderColor, (self.pos[0]+self.res[0]+self.margins[0], self.pos[1]-self.margins[1], 1, self.res[1]+self.margins[1]*2)) # top-right -> down\n \n def move(self, newPos):\n self.pos = newPos\n\nclass stickySave:\n def __init__(self, pos, res, margins, stickyObj, function):\n self.pos = pos\n self.res = res\n self.margins = margins\n self.stickyObj = stickyObj\n self.title = stickyObj.title\n self.func = function\n \n def collide(self, mPos, autoCall=False):\n if (self.pos[0] <= mPos[0] <= self.pos[0]+self.res[0]) and (self.pos[1] <= mPos[1] <= self.pos[1]+self.res[1]):\n if autoCall:\n self.func(self.title)\n else:\n return True\n return False\n \n def draw(self, dispSurface, pTW):\n pygame.draw.rect(dispSurface, self.stickyObj.mainColor, (self.pos[0], self.pos[1], self.res[0], self.res[1]))\n pTW.write(self.title, [self.pos[0]+self.margins[0], self.pos[1]+self.margins[1]])\n if self.collide(pygame.mouse.get_pos()):\n pygame.draw.rect(dispSurface, self.stickyObj.altColor1, (self.pos[0], self.pos[1], self.res[0], 1)) # top-left -> right\n pygame.draw.rect(dispSurface, self.stickyObj.altColor1, (self.pos[0], self.pos[1], 1, self.res[1])) # top-left -> bottom\n pygame.draw.rect(dispSurface, self.stickyObj.altColor1, (self.pos[0], self.pos[1]+self.res[1], self.res[0], 1)) # bottom-left -> right\n pygame.draw.rect(dispSurface, self.stickyObj.altColor1, (self.pos[0]+self.res[0], self.pos[1], 1, self.res[1])) # top-right -> down\n\n#======================#\n#----- INFO CLASS -----#\n#======================#\n\nclass Info:\n def __init__(self, fps):\n self.stickies = self.loadStickies()\n self.dispObj = None\n self.keyTracker = None\n self.textWriter = None\n self.clock = pygame.time.Clock()\n self.fps = fps\n # initializing function calls\n self.reset()\n \n def reset(self):\n self.dispObj = graphicalContainer((500, 350), fcs.rOA(self.stickies).mainColor, fcs.rOA(self.stickies).altColor1)\n fcs.rOA(self.stickies).resetIconPositions()\n self.keyTracker = pKT.KeyTracker(fcs.rOA(self.stickies).add, fcs.rOA(self.stickies).backspace, fcs.rOA(self.stickies).directionHandle, fps=self.fps)\n self.textWriter = pTW.TextWriter(self.dispObj.surface, \"FONT.png\")\n pygame.display.set_caption(fcs.rOA(self.stickies).title)\n\n def loadStickies(self):\n fcsLoadedStickies = fcs.loadStickies()\n stickies = []\n for i in fcsLoadedStickies:\n stickies.append(Sticky(self, i))\n stickies[0].active = True\n self.stickies = stickies\n return stickies\n # reset()\n # keyTracker = pKT.KeyTracker(fcs.rOA(stickies).add, fcs.rOA(stickies).backspace, fcs.rOA(stickies).directionHandle, fps=fps)\n \n def changeActive(self, title):\n fcs.rOA(self.stickies).menu = \"CONTENT\"\n for sticky in self.stickies:\n sticky.active = False\n for sticky in self.stickies:\n if sticky.title == title:\n sticky.active = True\n sticky.menu = \"CONTENT\"\n self.reset()\n\n#========================#\n#----- STICKY CLASS -----#\n#========================#\n\nICON_ORDER = [\"SAVE\", \"OPEN\", \"NEW\", \"SETTINGS\"]\n\nclass Sticky:\n def __init__(self, infoObj, attrDict, cursorPos=None):\n # functions from main file\n self.infoObj = infoObj\n # set info from attrDict\n self.title = attrDict[\"TITLE\"]\n self.theme = attrDict[\"THEME\"]\n self.showNumbers = attrDict[\"SHOW-NUMBERS\"]\n self.editTitle = attrDict[\"EDIT-TITLE\"]\n self.content = attrDict[\"CONTENT\"]\n self.mainColor = attrDict[\"MainColor\"]\n self.altColor1 = attrDict[\"AltColor1\"]\n self.altColor2 = attrDict[\"AltColor2\"]\n self.savePath = attrDict[\"SAVEPATH\"]\n self.filename = attrDict[\"filename\"]\n # other initializing vairables\n self.active = False\n self.menu = \"CONTENT\" # can be \"CONTENT\" , \"SETTINGS\" , or \"SAVE-PICKER\"\n self.baseMargins = [5, 5]\n self.margins = self.baseMargins\n # cursor position setting\n if not cursorPos:\n self.cursorPos = len(self.content)\n else:\n self.cursorPos = cursorPos\n # icons\n self.icons = {\n \"SAVE\": None,\n \"OPEN\": None,\n \"NEW\": None,\n \"SETTINGS\": None\n }\n self.settingsIcons = {\n \"THEME-YELLOW\": None,\n \"THEME-BLUE\": None,\n \"SHOW-NUMBERS\": None\n }\n self.savedStickies = []\n self.loadIcons()\n print(self.icons)\n for i in self.icons:\n print(i)\n self.checkMargins()\n \n def __str__(self):\n return (\"Sticky of name: \" + self.title + \", with a theme of: \" + self.theme + \", with a content of length: \" + str(len(self.content)) + \", Show Numbers: \" + str(self.showNumbers))\n\n def checkMargins(self):\n if self.showNumbers:\n currLines = fcs.getTotalLines(self.content)+1 # +1 to account for the \"starting at 0\"\n digits = len(str(currLines))+1 # +1 for the extra sepparating line\n newMargins = [ self.baseMargins[0] + ( 7 * digits ), self.baseMargins[1] ]\n self.margins = newMargins\n else:\n self.margins = self.baseMargins\n\n def loadIcons(self):\n self.icons = {\n \"SAVE\": icon(\"SAVE\", (0, 0), (\"ASSETS/SAVE-\"+self.theme+\".png\"), (4, 4), self.iconSave),\n \"NEW\": icon(\"NEW\", (0, 0), (\"ASSETS/NEW-\"+self.theme+\".png\"), (4, 4), self.iconNew),\n \"OPEN\": icon(\"OPEN\", (0, 0), (\"ASSETS/OPEN-\"+self.theme+\".png\"), (4, 4), self.iconOpen),\n \"SETTINGS\": icon(\"SETTINGS\", (0, 0), (\"ASSETS/SETTINGS-\"+self.theme+\".png\"), (4, 4), self.iconSettings)\n }\n self.settingsIcons = {\n \"THEME-YELLOW\": clickableRect(\"YELLOW\", (53, 5), (24, 24), (255, 218, 25), (25, 25, 25), self.iconChangeTheme),\n \"THEME-ORANGE\": clickableRect(\"ORANGE\", (89, 5), (24, 24), (255, 114, 0), (25, 25, 25), self.iconChangeTheme),\n \"THEME-PINK\": clickableRect(\"PINK\", (125, 5), (24, 24), (251, 93, 93), (25, 25, 25), self.iconChangeTheme),\n \"THEME-PURPLE\": clickableRect(\"PURPLE\", (161, 5), (24, 24), (151, 53, 255), (25, 25, 25), self.iconChangeTheme),\n \"THEME-BLUE\": clickableRect(\"BLUE\", (197, 5), (24, 24), (0, 164, 255), (25, 25, 25), self.iconChangeTheme),\n \"THEME-GREEN\": clickableRect(\"GREEN\", (233, 5), (24, 24), (0, 163, 37), (25, 25, 25), self.iconChangeTheme),\n \"SHOW-NUMBERS\": toggleableImg(\"SHOW-NUMBERS-TOGGLE\", (98, 34), (4, 4), [\"ASSETS/UNCHECK.png\", \"ASSETS/CHECK.png\"], self.iconShowNumbers, value=self.showNumbers),\n \"EDIT-TITLE\": toggleableImg(\"EDIT-TITLE-TOGGLE\", (105, 67), (4, 4), [\"ASSETS/UNCHECK.png\", \"ASSETS/CHECK.png\"], self.iconEditTitle, value=self.editTitle)\n }\n # template: clickableRect(\"COLOUR\", (x, 5), (24, 24), (\"AltColor1\"), (25, 25, 25), self.iconChangeTheme)\n\n def iconSave(self):\n sn = \"TRUE\" if self.showNumbers else \"FALSE\"\n et = \"TRUE\" if self.editTitle else \"FALSE\"\n # write\n toWrite = (\"/TITLE \" + self.title + \"\\n/THEME \" + self.theme + \"\\n/SHOW-NUMBERS \" + sn + \"\\n/EDIT-TITLE \" + et + \"\\n/START-CONTENT\\n\" + self.content + \"\\n/END-CONTENT\\n\")\n f = open(self.savePath, \"wt\")\n f.write(toWrite)\n f.close()\n print(\"SAVED\")\n \n def iconOpen(self):\n # print(\"classes.Sticky.iconOpen called\")\n if self.menu == \"OPEN\":\n self.menu = \"CONTENT\"\n else:\n self.menu = \"OPEN\"\n self.savedStickies = []\n x, y = 0, 0\n for i in self.infoObj.stickies:\n self.savedStickies.append(stickySave((self.baseMargins[0]+x, self.baseMargins[1]+y), (self.infoObj.dispObj.res[0]-10, 15), self.baseMargins, i, self.infoObj.changeActive))\n y += 15 + self.baseMargins[1]\n\n def iconNew(self):\n print(\"classes.Sticky.iconNew called\")\n fcs.makeNewSticky()\n self.infoObj.loadStickies()\n self.infoObj.changeActive(\"New Sticky\")\n\n def iconSettings(self):\n # print(\"classes.Sticky.iconSettings called\")\n if self.menu == \"SETTINGS\":\n self.menu = \"CONTENT\"\n else:\n self.menu = \"SETTINGS\"\n \n def iconChangeTheme(self, color):\n if color in [\"YELLOW\", \"ORANGE\", \"PINK\", \"PURPLE\", \"BLUE\", \"GREEN\"]:\n self.theme = color\n self.iconSave()\n self.loadIcons()\n self.infoObj.loadStickies()\n self.infoObj.reset()\n print(\"new color: \" + color)\n \n def iconShowNumbers(self, bool):\n self.showNumbers = bool\n self.checkMargins()\n \n def getAndSetTitle(self):\n contentSplit = self.content[:fcs.lenOfStringUntil(self.content, 0)].split(\" \")\n # print(contentSplit)\n title = \"\"\n for word in contentSplit[1:]:\n title += word\n if word != contentSplit[-1]:\n title += \" \"\n self.title = title\n\n def iconEditTitle(self, bool):\n self.editTitle = bool\n print(\"New Edit Title value: \" + str(self.editTitle))\n if self.editTitle and not self.content.startswith(\"TITLE: \"):\n self.content = (\"TITLE: \" + self.title + \"\\n\" + self.content)\n elif not self.editTitle and self.content.startswith(\"TITLE: \"):\n # get and set title\n self.getAndSetTitle()\n # change content back\n self.content = self.content[fcs.lenOfStringUntil(self.content, 0)+1:]\n # save, change title\n self.iconSave()\n # rename file\n fcs.renameSticky(self.filename, self.title, self.infoObj)\n # reload\n self.infoObj.loadStickies()\n self.infoObj.changeActive(self.title)\n \n def drawToolbarIcons(self):\n for icon in self.icons:\n self.icons[icon].draw(self.infoObj.dispObj.surface)\n \n def drawSavePicker(self):\n for save in self.savedStickies:\n save.draw(self.infoObj.dispObj.surface, self.infoObj.textWriter)\n\n def drawSettings(self):\n # theme (item #1)\n self.infoObj.textWriter.write(\"Theme: \", (5, 14))\n # show numbers (item #2)\n self.infoObj.textWriter.write(\"Show Numbers: \", (5, 47))\n # edit title (item #3)\n self.infoObj.textWriter.write(\"Editing Title: \", (5, 80))\n # icons\n for icon in self.settingsIcons:\n self.settingsIcons[icon].draw(dispSurface=self.infoObj.dispObj.surface)\n \n def drawNumbers(self, drawSeparator=True):\n if self.showNumbers:\n lineStr = \"\"\n for i in range(fcs.getTotalLines(self.content)+1):\n lineStr += str(i+1) + \"\\n\"\n self.infoObj.textWriter.write(lineStr, self.baseMargins)\n if drawSeparator:\n pygame.draw.rect(self.infoObj.dispObj.surface, self.altColor1, (self.margins[0]-6, 0, 2, self.infoObj.dispObj.res[1]))\n \n def resetIconPositions(self):\n x = 4\n y = self.infoObj.dispObj.res[1]-28\n for icon in ICON_ORDER:\n if icon == \"LOCK\":\n pass\n else:\n self.icons[icon].move((x, y))\n x += 32\n \n def mouseDown(self):\n # toolbar icons\n for icon in self.icons:\n self.icons[icon].collide(pygame.mouse.get_pos(), autoCall=True)\n # settings icons if the settings menu is open\n if self.menu == \"SETTINGS\":\n for icon in self.settingsIcons:\n self.settingsIcons[icon].collide(pygame.mouse.get_pos(), autoCall=True)\n # save picker saves if the save picker menu is open\n elif self.menu == \"OPEN\":\n for save in self.savedStickies:\n save.collide(pygame.mouse.get_pos(), autoCall=True)\n \n def add(self, string):\n self.content = fcs.addCharAtPos(self.content, self.cursorPos, string)\n self.cursorPos += 1\n self.checkMargins()\n \n def backspace(self, amount=1):\n for _ in range(amount):\n if len(self.content) > 0 and self.cursorPos > 0:\n self.content = fcs.removeCharAtPos(self.content, self.cursorPos-1)\n self.cursorPos -= 1\n self.checkMargins()\n \n def directionHandle(self, direction):\n if direction == \"LEFT\":\n if self.cursorPos > 0:\n self.cursorPos -= 1\n if direction == \"RIGHT\":\n if self.cursorPos < len(self.content):\n self.cursorPos += 1\n if direction == \"UP\":\n # get current line\n currLine = fcs.getLineInString(self.content, self.cursorPos)\n if currLine == 0:\n self.cursorPos = 0\n else:\n # lengths\n lenOfLineUp = fcs.lenOfStringAtLine(self.content, currLine-1)\n lenOfLineNow = fcs.lenOfStringAtLine(self.content, currLine)\n # offsets\n negOffSetNow = ( fcs.lenOfStringUntil(self.content, currLine) - self.cursorPos )\n negOffSetAbove = ( negOffSetNow + (lenOfLineUp - lenOfLineNow) )\n posOffSetNow = ( fcs.lenOfStringAtLine(self.content, currLine) - (fcs.lenOfStringToIncluding(self.content, currLine) - self.cursorPos) )\n # if longer than where the cursor is\n if lenOfLineUp > posOffSetNow:\n self.cursorPos = ( fcs.lenOfStringUntil(self.content, currLine-1, offSetNegative=negOffSetAbove))\n # if shorter than where the cursor is\n else:\n self.cursorPos = ( fcs.lenOfStringToIncluding(self.content, currLine-1) )\n if direction == \"DOWN\":\n # get current line\n currLine = fcs.getLineInString(self.content, self.cursorPos)\n if currLine == fcs.getLineInString(self.content, len(self.content)):\n self.cursorPos = len(self.content)\n else:\n # lengths\n lenOfLineBelow = fcs.lenOfStringAtLine(self.content, currLine+1)\n lenOfLineNow = fcs.lenOfStringAtLine(self.content, currLine)\n # offsets\n negOffSetNow = ( fcs.lenOfStringUntil(self.content, currLine) - self.cursorPos )\n negOffSetBelow = ( negOffSetNow + (lenOfLineBelow - lenOfLineNow) )\n posOffSetNow = ( fcs.lenOfStringAtLine(self.content, currLine) - (fcs.lenOfStringToIncluding(self.content, currLine) - self.cursorPos) )\n # if longer than where the cursor is\n if lenOfLineBelow > posOffSetNow:\n self.cursorPos = ( fcs.lenOfStringUntil(self.content, currLine+1, offSetNegative=negOffSetBelow) )\n # if shorter than where the cursor is\n else:\n self.cursorPos = ( fcs.lenOfStringToIncluding(self.content, currLine+1) )\n\nclass graphicalContainer:\n def __init__(self, res, mainColor, altColor, toolbarHeight=32):\n self.res = res\n self.mainColor = mainColor\n self.altColor = altColor\n self.surface = pygame.display.set_mode(res, pygame.RESIZABLE)\n self.tbH = toolbarHeight # tbH stands for toolbarHeight\n \n def drawBackground(self):\n pygame.draw.rect(self.surface, self.mainColor, (0, 0, self.res[0], self.res[1]))\n \n def drawToolbar(self):\n pygame.draw.rect(self.surface, self.altColor, (0, self.res[1]-self.tbH, self.res[0], self.tbH))\n\n def resize(self, newRes):\n self.surface = pygame.display.set_mode(newRes, pygame.RESIZABLE)\n self.res = newRes\n", "sub_path": "Old Versions/Stickies 2.1/classes.py", "file_name": "classes.py", "file_ext": "py", "file_size_in_byte": 17482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "functions.flip", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 104, "usage_type": "attribute"}, {"api_name": "PygameTextWriter.write", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.mouse.get_pos", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 108, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 122, "usage_type": "attribute"}, {"api_name": "functions.rOA", "line_number": 128, "usage_type": "call"}, {"api_name": "functions.rOA", "line_number": 129, "usage_type": "call"}, {"api_name": "PygameKeyTracker.KeyTracker", "line_number": 130, "usage_type": "call"}, {"api_name": "functions.rOA", "line_number": 130, "usage_type": "call"}, {"api_name": "PygameTextWriter.TextWriter", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 132, "usage_type": "attribute"}, {"api_name": "functions.rOA", "line_number": 132, "usage_type": "call"}, {"api_name": "functions.loadStickies", "line_number": 135, "usage_type": "call"}, {"api_name": "functions.rOA", "line_number": 146, "usage_type": "call"}, {"api_name": "functions.getTotalLines", "line_number": 210, "usage_type": "call"}, {"api_name": "functions.makeNewSticky", "line_number": 260, "usage_type": "call"}, {"api_name": "functions.lenOfStringUntil", "line_number": 285, "usage_type": "call"}, {"api_name": "functions.lenOfStringUntil", "line_number": 303, "usage_type": "call"}, {"api_name": "functions.renameSticky", "line_number": 307, "usage_type": "call"}, {"api_name": "functions.getTotalLines", "line_number": 334, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 338, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 338, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 353, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 353, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 357, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 357, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 361, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 361, "usage_type": "attribute"}, {"api_name": "functions.addCharAtPos", "line_number": 364, "usage_type": "call"}, {"api_name": "functions.removeCharAtPos", "line_number": 371, "usage_type": "call"}, {"api_name": "functions.getLineInString", "line_number": 384, "usage_type": "call"}, {"api_name": "functions.lenOfStringAtLine", "line_number": 389, "usage_type": "call"}, {"api_name": "functions.lenOfStringAtLine", "line_number": 390, "usage_type": "call"}, {"api_name": "functions.lenOfStringUntil", "line_number": 392, "usage_type": "call"}, {"api_name": "functions.lenOfStringAtLine", "line_number": 394, "usage_type": "call"}, {"api_name": "functions.lenOfStringToIncluding", "line_number": 394, "usage_type": "call"}, {"api_name": "functions.lenOfStringUntil", "line_number": 397, "usage_type": "call"}, {"api_name": "functions.lenOfStringToIncluding", "line_number": 400, "usage_type": "call"}, {"api_name": "functions.getLineInString", "line_number": 403, "usage_type": "call"}, {"api_name": "functions.getLineInString", "line_number": 404, "usage_type": "call"}, {"api_name": "functions.lenOfStringAtLine", "line_number": 408, "usage_type": "call"}, {"api_name": "functions.lenOfStringAtLine", "line_number": 409, "usage_type": "call"}, {"api_name": "functions.lenOfStringUntil", "line_number": 411, "usage_type": "call"}, {"api_name": "functions.lenOfStringAtLine", "line_number": 413, "usage_type": "call"}, {"api_name": "functions.lenOfStringToIncluding", "line_number": 413, "usage_type": "call"}, {"api_name": "functions.lenOfStringUntil", "line_number": 416, "usage_type": "call"}, {"api_name": "functions.lenOfStringToIncluding", "line_number": 419, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 426, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 426, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 426, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 430, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 430, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 433, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 433, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 436, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 436, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 436, "usage_type": "attribute"}]} +{"seq_id": "498923723", "text": "import oss2\r\nimport os,time,sys\r\n\r\n####\r\nacc_id = sys.argv[1]\r\nacc_sec = sys.argv[2]\r\noss_storage_path = sys.argv[3]\r\n##\r\nbucket_name = 'novo-data-nj'\r\n##\r\nENDPOINT = 'http://oss-cn-hangzhou.aliyuncs.com'\r\n\r\ncur_date = time.strftime('%Y_%m',time.localtime(time.time()))\r\nworkdir = sys.argv[4]\r\n\r\nauth = oss2.Auth(acc_id, acc_sec)\r\nbucket = oss2.Bucket(auth, ENDPOINT, bucket_name)\r\n\r\nossutil = '/home/src/gopath/bin/ossutil'\r\n\r\n##get\\create workdir##\r\ndef get_work_dir():\r\n\tif not os.path.exists(workdir):\r\n\t\t#print('workdir doesn\\'t exist,creating')\r\n\t\ttry:\r\n\t\t\tos.makedirs(workdir)\r\n\t\t\t#print('success')\r\n\t\texcept Exception as e:\r\n\t\t\t#print('failed:')\r\n\t\t\traise e\r\n\tbatches = os.listdir(workdir)\r\n\tnums = [0]\r\n\t#print(type(nums),nums)\r\n\tfor x in batches:\r\n\t\tnums.append(int(x[-1])) \r\n\tif not os.path.isfile(workdir+cur_date.split('_')[-1]+'_'+'batch'+str(max(nums))+'/.finished'):\r\n\t\treturn workdir+cur_date.split('_')[-1]+'_'+'batch'+str(max(nums))+'/'\r\n\telse:\r\n\t\treturn workdir+cur_date.split('_')[-1]+'_'+'batch'+str(max(nums)+1)+'/'\r\n\r\n##get .gz s##\r\ndef get_file_list(download_dir):\r\n\tos.system('{} config -i {} -k {} -e {}'.format(ossutil,acc_id,acc_sec,ENDPOINT))\r\n\tfile_detail_list = os.popen('{} ls {}'.format(ossutil,oss_storage_path))\r\n\tf = file_detail_list.read()\r\n\tprint(f)\r\n\tfile_list = []\r\n\tfor x in f.split('\\n'):\r\n\t\tif oss_storage_path in x:\r\n\t\t\tfile = x.split(' ')[-1]\r\n\t\t\tif(file.endswith('.gz')):\r\n\t\t\t\t\r\n\t\t\t\tif not os.path.exists(download_dir+file.split('/')[-1].split('_')[0]+'/'):\r\n\t\t\t\t\tos.makedirs(download_dir+file.split('/')[-1].split('_')[0]+'/')\r\n\t\t\t\tfile_list.append(file)\r\n\tprint(file_list)\r\n\treturn file_list\r\n\r\ndef check_sum():\r\n\tpass\r\n\r\ndef main():\r\n\tdownload_dir = get_work_dir()\r\n\t#print('will download to :',download_dir)\r\n\tfor file in get_file_list(download_dir):\r\n\t\tpass\r\n\t\t#print(file)\r\n\t\t#bucket.get_object_to_file(file.split(bucket_name+'/')[-1], download_dir+file.split('/')[-1].split('_')[0]+'/'+file.split('/')[-1])\r\n\t#os.mknod(download_dir+'.finished')\r\n\t#print('download finished')\r\n\r\nif __name__ == '__main__':\r\n\tmain()", "sub_path": "download.py", "file_name": "download.py", "file_ext": "py", "file_size_in_byte": 2067, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 13, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "oss2.Auth", "line_number": 16, "usage_type": "call"}, {"api_name": "oss2.Bucket", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 43, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "384802631", "text": "from CGH import CGH\nimport pycuda.gpuarray as gpuarray\nimport pycuda.cumath as cumath\nimport pycuda.autoinit\nfrom pycuda.compiler import SourceModule\nimport numpy as np\n\n\nclass cudaCGH(CGH):\n\n def __init__(self, slm=None):\n super(cudaCGH, self).__init__(slm=slm)\n self.init_cuda()\n\n def init_cuda(self):\n mod = SourceModule(\"\"\"\n #include <pycuda-complex.hpp>\n\n __global__ void outer(pycuda::complex<float> *x, \\\n pycuda::complex<float> *y, \\\n pycuda::complex<float> *out, \\\n int nx, int ny)\n {\n int i = threadIdx.x + blockDim.x * blockIdx.x;\n int j = threadIdx.y + blockDim.y * blockIdx.y;\n if (i < nx && j < ny){\n out[i*ny + j] = x[i]*y[j];\n }\n }\n\n __global__ void phase(pycuda::complex<float> *psi, \\\n unsigned char *out, \\\n int nx, int ny)\n {\n int i = threadIdx.x + blockDim.x * blockIdx.x;\n int j = threadIdx.y + blockDim.y * blockIdx.y;\n\n int n;\n float im, re, phi;\n\n if (i < nx && j < ny){\n n = i*ny + j;\n im = psi[n]._M_im;\n re = psi[n]._M_re;\n phi = (128./3.14159265359) * atan2f(im, re) + 127.;\n out[n] = (unsigned char) phi;\n }\n }\n \"\"\")\n self.outer = mod.get_function(\"outer\")\n self.phase = mod.get_function(\"phase\")\n self.npts = np.int32(self.w * self.h)\n self.block = (16, 16, 1)\n dx, mx = divmod(self.w, self.block[0])\n dy, my = divmod(self.h, self.block[1])\n self.grid = ((dx + (mx > 0)) * self.block[0],\n (dy + (my > 0)) * self.block[1])\n\n def quantize(self):\n self.phase(self._psi, self._phi,\n np.int32(self.w), np.int32(self.h),\n block=self.block, grid=self.grid)\n self._phi.get(self.phi)\n return self.phi.T\n \n def compute_one(self, amp, r):\n cumath.exp(self.iqx * r.x() + self.iqxsq * r.z(), out=self._ex)\n cumath.exp(self.iqy * r.y() + self.iqysq * r.z(), out=self._ey)\n self._ex *= amp\n self.outer(self._ex, self._ey, self._buffer,\n np.int32(self.w), np.int32(self.h),\n block=self.block, grid=self.grid)\n return self._buffer\n\n def updateGeometry(self):\n shape = (self.w, self.h)\n self._buffer = gpuarray.zeros(shape, dtype=np.complex64)\n self._psi = gpuarray.zeros(shape, dtype=np.complex64)\n self._phi = gpuarray.zeros(shape, dtype=np.uint8)\n self.phi = np.zeros(shape, dtype=np.uint8)\n self._ex = gpuarray.zeros(self.w, dtype=np.complex64)\n self._ey = gpuarray.zeros(self.h, dtype=np.complex64)\n qx = gpuarray.arange(self.w, dtype=np.float32).astype(np.complex64)\n qy = gpuarray.arange(self.h, dtype=np.float32).astype(np.complex64)\n qx = self.qpp * (qx - self.rs.x())\n qy = self.qpp * (qy - self.rs.y())\n self.iqx = 1j * qx\n self.iqy = 1j * qy\n self.iqxsq = 1j * qx * qx\n self.iqysq = 1j * qy * qy\n\n \nif __name__ == '__main__':\n from PyQt4.QtGui import QApplication\n import sys\n from QSLM import QSLM\n\n app = QApplication(sys.argv)\n slm = QSLM()\n cgh = cudaCGH(slm)\n sys.exit(app.exec_())\n", "sub_path": "cudaCGH.py", "file_name": "cudaCGH.py", "file_ext": "py", "file_size_in_byte": 3438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "CGH.CGH", "line_number": 9, "usage_type": "name"}, {"api_name": "pycuda.compiler.SourceModule", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 61, "usage_type": "call"}, {"api_name": "pycuda.cumath.exp", "line_number": 67, "usage_type": "call"}, {"api_name": "pycuda.cumath", "line_number": 67, "usage_type": "name"}, {"api_name": "pycuda.cumath.exp", "line_number": 68, "usage_type": "call"}, {"api_name": "pycuda.cumath", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.int32", "line_number": 71, "usage_type": "call"}, {"api_name": "pycuda.gpuarray.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "pycuda.gpuarray", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.complex64", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pycuda.gpuarray.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "pycuda.gpuarray", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.complex64", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pycuda.gpuarray.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "pycuda.gpuarray", "line_number": 79, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pycuda.gpuarray.zeros", "line_number": 81, "usage_type": "call"}, {"api_name": "pycuda.gpuarray", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.complex64", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pycuda.gpuarray.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "pycuda.gpuarray", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.complex64", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pycuda.gpuarray.arange", "line_number": 83, "usage_type": "call"}, {"api_name": "pycuda.gpuarray", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pycuda.gpuarray.arange", "line_number": 84, "usage_type": "call"}, {"api_name": "pycuda.gpuarray", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 98, "usage_type": "attribute"}, {"api_name": "QSLM.QSLM", "line_number": 99, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "531252592", "text": "import logging\nfrom functools import lru_cache\n\nimport numpy as np\nfrom scipy.interpolate import splev, splrep\nfrom scipy import integrate\nfrom barry.models.bao_power import PowerSpectrumFit\nfrom barry.cosmology.camb_generator import Omega_m_z\n\n\nclass PowerNoda2019(PowerSpectrumFit):\n \"\"\" P(k) model inspired from Noda 2019.\n\n See https://ui.adsabs.harvard.edu/abs/2019arXiv190106854N for details.\n\n \"\"\"\n\n def __init__(\n self,\n name=\"Pk Noda 2019\",\n fix_params=(\"om\", \"f\", \"gamma\"),\n gammaval=None,\n smooth_type=\"hinton2017\",\n nonlinear_type=\"spt\",\n recon=False,\n postprocess=None,\n smooth=False,\n correction=None,\n ):\n self.recon = recon\n self.recon_smoothing_scale = None\n if gammaval is None:\n if self.recon:\n gammaval = 4.0\n else:\n gammaval = 1.0\n\n super().__init__(name=name, fix_params=fix_params, smooth_type=smooth_type, postprocess=postprocess, smooth=smooth, correction=correction)\n self.set_default(\"gamma\", gammaval)\n\n self.nmu = 100\n self.mu = np.linspace(0.0, 1.0, self.nmu)\n self.smoothing_kernel = None\n\n self.nonlinear_type = nonlinear_type.lower()\n if not self.validate_nonlinear_method():\n exit(0)\n\n def validate_nonlinear_method(self):\n types = [\"spt\", \"halofit\"]\n if self.nonlinear_type in types:\n return True\n else:\n logging.getLogger(\"barry\").error(f\"Smoothing method is {self.nonlinear_type} and not in list {types}\")\n return False\n\n @lru_cache(maxsize=32)\n def get_pt_data(self, om):\n return self.PT.get_data(om=om)\n\n @lru_cache(maxsize=32)\n def get_damping(self, growth, om, gamma):\n return np.exp(\n -np.outer(\n (1.0 + (2.0 + growth) * growth * self.mu ** 2) * self.get_pt_data(om)[\"sigma_dd_rs\"]\n + (growth * self.mu ** 2 * (self.mu ** 2 - 1.0)) * self.get_pt_data(om)[\"sigma_ss_rs\"],\n self.camb.ks ** 2,\n )\n / gamma\n )\n\n @lru_cache(maxsize=32)\n def get_nonlinear(self, growth, om):\n return (\n self.get_pt_data(om)[\"Pdd_\" + self.nonlinear_type],\n np.outer(2.0 * growth * self.mu ** 2, self.get_pt_data(om)[\"Pdt_\" + self.nonlinear_type]),\n np.outer((growth * self.mu ** 2) ** 2, self.get_pt_data(om)[\"Ptt_\" + self.nonlinear_type]),\n )\n\n def set_data(self, data):\n super().set_data(data)\n # Compute the smoothing kernel (assumes a Gaussian smoothing kernel)\n if self.recon:\n self.smoothing_kernel = np.exp(-self.camb.ks ** 2 * self.recon_smoothing_scale ** 2 / 2.0)\n\n def declare_parameters(self):\n super().declare_parameters()\n self.add_param(\"f\", r\"$f$\", 0.01, 1.0, 0.5) # Growth rate of structure\n self.add_param(\"gamma\", r\"$\\gamma_{rec}$\", 1.0, 8.0, 1.0) # Describes the sharpening of the BAO post-reconstruction\n self.add_param(\"A\", r\"$A$\", -10, 30.0, 10) # Fingers-of-god damping\n\n def compute_power_spectrum(self, k, p, smooth=False):\n \"\"\" Computes the power spectrum model at k/alpha using the Ding et. al., 2018 EFT0 model\n \n Parameters\n ----------\n k : np.ndarray\n Array of wavenumbers to compute\n p : dict\n dictionary of parameter names to their values\n \n Returns\n -------\n array\n pk_final - The power spectrum at the dilated k-values\n \n \"\"\"\n\n # Get the basic power spectrum components\n ks = self.camb.ks\n pk_smooth_lin, pk_ratio = self.compute_basic_power_spectrum(p[\"om\"])\n\n # Compute the growth rate depending on what we have left as free parameters\n growth = p[\"f\"]\n gamma = p[\"gamma\"]\n\n # Lets round some things for the sake of numerical speed\n om = np.round(p[\"om\"], decimals=5)\n growth = np.round(growth, decimals=5)\n gamma = np.round(gamma, decimals=5)\n\n # Compute the BAO damping/propagator\n propagator = self.get_damping(growth, om, gamma)\n\n # Compute the smooth model\n if self.recon:\n kaiser_prefac = 1.0 + np.outer(growth / p[\"b\"] * self.mu ** 2, 1.0 - self.smoothing_kernel)\n else:\n kaiser_prefac = 1.0 + np.tile(growth / p[\"b\"] * self.mu ** 2, (len(ks), 1)).T\n fog = np.exp(-p[\"A\"] * ks ** 2)\n pk_smooth = p[\"b\"] ** 2 * pk_smooth_lin * fog\n\n # Compute the non-linear correction to the smooth power spectrum\n p_dd, p_dt, p_tt = self.get_nonlinear(growth, om)\n pk_nonlinear = p_dd + p_dt / p[\"b\"] + p_tt / p[\"b\"] ** 2\n\n # Integrate over mu\n if smooth:\n pk1d = integrate.simps(pk_smooth * ((1.0 + 0.0 * pk_ratio * propagator) * kaiser_prefac ** 2 + pk_nonlinear), self.mu, axis=0)\n else:\n pk1d = integrate.simps(pk_smooth * ((1.0 + pk_ratio * propagator) * kaiser_prefac ** 2 + pk_nonlinear), self.mu, axis=0)\n\n pk_final = splev(k / p[\"alpha\"], splrep(ks, pk1d))\n\n return pk_final\n\n\nif __name__ == \"__main__\":\n\n import sys\n import timeit\n from barry.datasets.dataset_power_spectrum import PowerSpectrum_SDSS_DR12_Z061_NGC\n\n sys.path.append(\"../..\")\n logging.basicConfig(level=logging.DEBUG, format=\"[%(levelname)7s |%(funcName)20s] %(message)s\")\n logging.getLogger(\"matplotlib\").setLevel(logging.ERROR)\n\n dataset = PowerSpectrum_SDSS_DR12_Z061_NGC(recon=False)\n data = dataset.get_data()\n model_pre = PowerNoda2019(recon=False)\n model_pre.set_data(data)\n\n dataset = PowerSpectrum_SDSS_DR12_Z061_NGC(recon=True)\n data = dataset.get_data()\n model_post = PowerNoda2019(recon=True)\n model_post.set_data(data)\n\n p = {\"om\": 0.3, \"alpha\": 1.0, \"A\": 7.0, \"b\": 1.6, \"gamma\": 4.0}\n for v in np.linspace(1.0, 20, 20):\n p[\"A\"] = v\n print(v, model_post.get_likelihood(p, data[0]))\n\n n = 200\n\n def test_pre():\n model_pre.get_likelihood(p, data[0])\n\n def test_post():\n model_post.get_likelihood(p, data[0])\n\n print(\"Pre-reconstruction likelihood takes on average, %.2f milliseconds\" % (timeit.timeit(test_pre, number=n) * 1000 / n))\n print(\"Post-reconstruction likelihood takes on average, %.2f milliseconds\" % (timeit.timeit(test_post, number=n) * 1000 / n))\n\n if True:\n p, minv = model_pre.optimize()\n print(\"Pre reconstruction optimisation:\")\n print(p)\n print(minv)\n model_pre.plot(p)\n\n print(\"Post reconstruction optimisation:\")\n p, minv = model_post.optimize()\n print(p)\n print(minv)\n model_post.plot(p)\n", "sub_path": "barry/models/bao_power_Noda2019.py", "file_name": "bao_power_Noda2019.py", "file_ext": "py", "file_size_in_byte": 6759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "barry.models.bao_power.PowerSpectrumFit", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 54, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 64, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 77, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 130, "usage_type": "call"}, {"api_name": "scipy.integrate.simps", "line_number": 139, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 139, "usage_type": "name"}, {"api_name": "scipy.integrate.simps", "line_number": 141, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 141, "usage_type": "name"}, {"api_name": "scipy.interpolate.splev", "line_number": 143, "usage_type": "call"}, {"api_name": "scipy.interpolate.splrep", "line_number": 143, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 154, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 155, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 155, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 156, "usage_type": "attribute"}, {"api_name": "barry.datasets.dataset_power_spectrum.PowerSpectrum_SDSS_DR12_Z061_NGC", "line_number": 158, "usage_type": "call"}, {"api_name": "barry.datasets.dataset_power_spectrum.PowerSpectrum_SDSS_DR12_Z061_NGC", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 169, "usage_type": "call"}, {"api_name": "timeit.timeit", "line_number": 181, "usage_type": "call"}, {"api_name": "timeit.timeit", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "210196575", "text": "#! /usr/bin/env python3\n\nfrom collections import namedtuple\nimport contextlib\nimport os\nfrom os import path\n\nimport click\nfrom good import Any, Default, Invalid, Optional, Schema\nimport numpy as np\nimport yaml\n\nimport camtrack\nimport cmptrack\nimport corners\nimport data3d\nimport frameseq\n\n\nFramePair = namedtuple('FramePair', ('frame_1', 'frame_2'))\n\n\ndef _check_frame_pair(frame_pair):\n if (not isinstance(frame_pair, list) or\n len(frame_pair) != 2 or\n not all(isinstance(x, int) for x in frame_pair)):\n raise Invalid('Invalid initial frame pair format')\n return FramePair(*frame_pair)\n\n\nDATASET_CONFIG_SCHEMA = Schema({\n 'tests': {\n Any(str): {\n 'camera': str,\n 'ground_truth': str,\n 'rgb': str,\n Optional('initial_frames'): Any(_check_frame_pair, Default(None))\n }\n }\n})\n\n\nTestInfo = namedtuple('TestInfo', (\n 'camera',\n 'ground_truth',\n 'rgb',\n 'initial_frames'\n))\n\n\ndef _create_test_info(camera, ground_truth, rgb, initial_frames=None):\n return TestInfo(camera, ground_truth, rgb, initial_frames)\n\n\ndef read_config(config_path):\n root = path.dirname(path.abspath(config_path))\n with open(config_path, 'r') as config_file:\n raw_config_data = yaml.load(config_file)\n config_data = DATASET_CONFIG_SCHEMA(raw_config_data)\n config = dict()\n for name, info in config_data['tests'].items():\n config[name] = _create_test_info(**{\n k: path.join(root, v) if isinstance(v, str) else v\n for k, v in info.items()\n })\n return config\n\n\ndef _run_and_save_logs(stdout_path, stderr_path, func, *args, **kwargs):\n with open(stdout_path, 'w') as stdout_file:\n with open(stderr_path, 'w') as stderr_file:\n with contextlib.redirect_stdout(stdout_file):\n with contextlib.redirect_stderr(stderr_file):\n result = func(*args, **kwargs)\n return result\n\n\ndef _make_dir_if_needed(dir_path, indent_level=0):\n if not dir_path:\n return\n if not path.exists(dir_path):\n click.echo(\"{}make dir '{}'\".format(' ' * indent_level, dir_path))\n os.mkdir(dir_path)\n\n\ndef _read_camera_parameters(parameters_path):\n with open(parameters_path, 'r') as camera_file:\n return data3d.read_camera_parameters(camera_file)\n\n\ndef _write_poses(track, track_path):\n with open(track_path, 'w') as track_file:\n data3d.write_poses(track, track_file)\n\n\ndef _write_point_cloud(point_cloud, pc_path):\n with open(pc_path, 'w') as pc_file:\n data3d.write_point_cloud(point_cloud, pc_file)\n\n\ndef _read_ground_truth(ground_truth_path):\n with open(ground_truth_path, 'r') as gt_file:\n return data3d.read_poses(gt_file)\n\n\ndef _write_error_measure(error, dst_path):\n with open(dst_path, 'w') as dst_file:\n yaml.dump({'error_measure': float(error)}, dst_file)\n\n\ndef _calc_corners_path(test_name, corners_dir):\n if corners_dir is None:\n return None\n return path.join(corners_dir, test_name + '.pickle')\n\n\ndef _try_to_load_corners(corners_path):\n if not corners_path:\n return None\n if not path.exists(corners_path):\n return None\n with open(corners_path, 'rb') as corners_file:\n return corners.load(corners_file)\n\n\ndef _try_to_dump_corners(corners_path, corner_storage):\n if not corners_path:\n return None\n with open(corners_path, 'wb') as corners_file:\n return corners.dump(corner_storage, corners_file)\n\n\ndef _load_or_calculate_corners(grayscale_seq, test_name,\n test_dir, corners_dir):\n corners_path = _calc_corners_path(test_name, corners_dir)\n corner_storage = _try_to_load_corners(corners_path)\n if corner_storage:\n click.echo(\" corners are loaded from '{}'\".format(corners_path))\n return corner_storage\n try:\n click.echo(' start corners tracking')\n corner_storage = _run_and_save_logs(\n path.join(test_dir, 'corners_stdout.txt'),\n path.join(test_dir, 'corners_stderr.txt'),\n corners.build,\n grayscale_seq,\n False\n )\n except Exception as err: # pylint:disable=broad-except\n click.echo(' corners tracking failed: {}'.format(err))\n return None\n else:\n click.echo(' corners tracking succeeded')\n if corners_path:\n _try_to_dump_corners(corners_path, corner_storage)\n click.echo(\" corners are dumped to '{}'\".format(corners_path))\n return corner_storage\n\n\ndef _do_tracking(test_info, ground_truth, corner_storage, test_dir):\n camera_parameters = _read_camera_parameters(test_info.camera)\n if test_info.initial_frames is not None:\n frame_1, frame_2 = test_info.initial_frames\n known_view_1 = (frame_1, ground_truth[frame_1])\n known_view_2 = (frame_2, ground_truth[frame_2])\n else:\n known_view_1 = None\n known_view_2 = None\n try:\n click.echo(' start scene solving')\n track, point_cloud = _run_and_save_logs(\n path.join(test_dir, 'tracking_stdout.txt'),\n path.join(test_dir, 'tracking_stderr.txt'),\n camtrack.track_and_calc_colors,\n camera_parameters,\n corner_storage,\n test_info.rgb,\n known_view_1,\n known_view_2\n )\n except Exception as err: # pylint:disable=broad-except\n click.echo(' scene solving failed: {}'.format(err))\n return None, None\n else:\n click.echo(' scene solving succeeded')\n return track, point_cloud\n\n\ndef run_tests(config, output_dir, corners_dir):\n # pylint:disable=too-many-locals\n\n _make_dir_if_needed(output_dir)\n _make_dir_if_needed(corners_dir)\n\n all_r_errors = []\n all_t_errors = []\n for test_name, test_info in config.items():\n click.echo(test_name)\n\n test_dir = path.join(output_dir, test_name)\n _make_dir_if_needed(test_dir, 1)\n\n grayscale_seq = frameseq.read_grayscale_f32(test_info.rgb)\n\n inf_errors = np.full((len(grayscale_seq),), np.inf)\n all_r_errors.append(inf_errors)\n all_t_errors.append(inf_errors)\n\n corner_storage = _load_or_calculate_corners(grayscale_seq, test_name,\n test_dir, corners_dir)\n if not corner_storage:\n continue\n\n ground_truth = _read_ground_truth(test_info.ground_truth)\n track, point_cloud = _do_tracking(test_info, ground_truth,\n corner_storage, test_dir)\n if not track:\n continue\n\n _write_poses(track, path.join(test_dir, 'track.yml'))\n _write_point_cloud(point_cloud, path.join(test_dir, 'point_cloud.yml'))\n\n r_errors, t_errors = cmptrack.calc_errors(ground_truth, track)\n all_r_errors[-1] = r_errors\n all_t_errors[-1] = t_errors\n click.echo(' rotation error (degrees): median={}, max={}'.format(\n np.median(np.degrees(r_errors)),\n np.degrees(r_errors).max()\n ))\n click.echo(' translation error: median={}, max={}'.format(\n np.median(t_errors),\n t_errors.max()\n ))\n click.echo(' overall error measure: {}'.format(\n cmptrack.calc_vol_under_surface(r_errors, t_errors)\n ))\n\n all_r_errors = np.concatenate(all_r_errors)\n all_t_errors = np.concatenate(all_t_errors)\n error_measure = cmptrack.calc_vol_under_surface(all_r_errors, all_t_errors)\n\n click.echo('total error measure: {}'.format(error_measure))\n _write_error_measure(error_measure,\n path.join(output_dir, 'error_measure.yml'))\n\n\n@click.command()\n@click.argument('config_path', type=click.Path(exists=True, dir_okay=False))\n@click.argument('output_dir', type=click.Path(file_okay=False))\n@click.option('--corners-dir', type=click.Path(file_okay=False))\ndef cli(config_path, output_dir, corners_dir):\n config = read_config(config_path)\n run_tests(config, output_dir, corners_dir)\n\n\nif __name__ == '__main__':\n cli() # pylint:disable=no-value-for-parameter\n", "sub_path": "camtrack/testrunner.py", "file_name": "testrunner.py", "file_ext": "py", "file_size_in_byte": 8172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.namedtuple", "line_number": 20, "usage_type": "call"}, {"api_name": "good.Invalid", "line_number": 27, "usage_type": "call"}, {"api_name": "good.Schema", "line_number": 31, "usage_type": "call"}, {"api_name": "good.Any", "line_number": 33, "usage_type": "call"}, {"api_name": "good.Optional", "line_number": 37, "usage_type": "call"}, {"api_name": "good.Any", "line_number": 37, "usage_type": "call"}, {"api_name": "good.Default", "line_number": 37, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 56, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "name"}, {"api_name": "contextlib.redirect_stdout", "line_number": 72, "usage_type": "call"}, {"api_name": "contextlib.redirect_stderr", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 82, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 83, "usage_type": "call"}, {"api_name": "data3d.read_camera_parameters", "line_number": 88, "usage_type": "call"}, {"api_name": "data3d.write_poses", "line_number": 93, "usage_type": "call"}, {"api_name": "data3d.write_point_cloud", "line_number": 98, "usage_type": "call"}, {"api_name": "data3d.read_poses", "line_number": 103, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "name"}, {"api_name": "corners.load", "line_number": 123, "usage_type": "call"}, {"api_name": "corners.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 138, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "name"}, {"api_name": "corners.build", "line_number": 145, "usage_type": "attribute"}, {"api_name": "click.echo", "line_number": 150, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 153, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 156, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "name"}, {"api_name": "camtrack.track_and_calc_colors", "line_number": 174, "usage_type": "attribute"}, {"api_name": "click.echo", "line_number": 182, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 185, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "name"}, {"api_name": "frameseq.read_grayscale_f32", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path", "line_number": 221, "usage_type": "name"}, {"api_name": "cmptrack.calc_errors", "line_number": 223, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 228, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 231, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 234, "usage_type": "call"}, {"api_name": "cmptrack.calc_vol_under_surface", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 239, "usage_type": "call"}, {"api_name": "cmptrack.calc_vol_under_surface", "line_number": 240, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path", "line_number": 244, "usage_type": "name"}, {"api_name": "click.command", "line_number": 247, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 248, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 248, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 249, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 249, "usage_type": "call"}, {"api_name": "click.option", "line_number": 250, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "203552289", "text": "from libs.menus import os_creds, list_projects\n\n##\n# OpenStack Libraries\n##\nfrom openstack import connection, profile\nimport npyscreen\n\nclass BackupState:\n @classmethod\n def run(self, log):\n log.debug(\"Inside the backup state classmethod\")\n inst = BackupState(log)\n # Get creds of main install\n inst.get_creds() \\\n .make_connection() \\\n .backup_projects()\n ###\n # Instance Methods here on\n ###\n def __init__(self, log):\n self._log = log\n self._log.debug(\"Initialised BackupState\")\n\n def get_creds(self):\n (\n self._url,\n self._username,\n self._password,\n self._domain,\n self._project\n ) = os_creds.AskForCredentials.run()\n return self\n\n def make_connection(self):\n self._log.debug(\"About to init connection\")\n auth_args = {\n 'auth_url': self._url,\n 'project_name': self._project,\n 'user_domain_name': self._domain,\n 'project_domain_name': self._domain,\n 'username': self._username,\n 'password': self._password,\n }\n self._conn = connection.Connection(**auth_args)\n self._log.info(\"Connection to %s established!\" % self._url)\n return self\n\n\n def backup_projects(self):\n self._log.info(\"Initialising backup of projects\")\n projects = list(self._conn.identity.projects())\n self._log.debug(\"Asking user for projects\")\n selections = list_projects.ListProjects.run(projects)\n", "sub_path": "source/libs/states/backup.py", "file_name": "backup.py", "file_ext": "py", "file_size_in_byte": 1577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "libs.menus.os_creds.AskForCredentials.run", "line_number": 32, "usage_type": "call"}, {"api_name": "libs.menus.os_creds.AskForCredentials", "line_number": 32, "usage_type": "attribute"}, {"api_name": "libs.menus.os_creds", "line_number": 32, "usage_type": "name"}, {"api_name": "openstack.connection.Connection", "line_number": 45, "usage_type": "call"}, {"api_name": "openstack.connection", "line_number": 45, "usage_type": "name"}, {"api_name": "libs.menus.list_projects.ListProjects.run", "line_number": 54, "usage_type": "call"}, {"api_name": "libs.menus.list_projects.ListProjects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "libs.menus.list_projects", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "109866120", "text": "#!/usr/bin/env python\nimport os\n\nfrom bokeh.models import Range1d\nfrom bokeh.plotting import figure, output_file, show\nfrom dateutil import parser\n\n\ndef graph(data, title, x_label, y_label, legend='', x_xais_type='datetime'):\n\n x = []\n y = []\n\n for datum in data:\n a, b = datum\n\n if x_xais_type == 'datetime':\n a = parser.parse(a)\n\n x.append(a)\n y.append(b)\n\n filename = os.path.join('output', title.lower().replace(' ', '_') + \".html\")\n\n output_file(filename, title=title)\n p = figure(title=title, x_axis_label=x_label, y_axis_label=y_label, x_axis_type=\"datetime\")\n p.line(x, y, legend=legend)\n\n x_min, x_max = x[0], x[-1]\n y_min, y_max = y[0], y[-1]\n\n p.x_range = Range1d(x_min, x_max)\n p.y_range = Range1d(y_min, y_max)\n\n show(p)\n\n", "sub_path": "project_growth/project_graph.py", "file_name": "project_graph.py", "file_ext": "py", "file_size_in_byte": 810, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "dateutil.parser.parse", "line_number": 18, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "bokeh.plotting.output_file", "line_number": 25, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 26, "usage_type": "call"}, {"api_name": "bokeh.models.Range1d", "line_number": 32, "usage_type": "call"}, {"api_name": "bokeh.models.Range1d", "line_number": 33, "usage_type": "call"}, {"api_name": "bokeh.plotting.show", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "426478586", "text": "from random import seed\nfrom random import randint\nimport sqlite3\n\ndone = False\nlogged_in = False\nmain_input = 0\nsecondary_input = 0\niin = \"400000\"\nchecksum = \"\"\nacct_num = \"\"\nfull_card_num = \"\"\npin = \"\"\nbalance = 0\n\nconn = sqlite3.connect('card.s3db')\nconn.row_factory = sqlite3.Row\ncursor = conn.cursor()\n\ncreate_table = \"CREATE TABLE IF NOT EXISTS card (id INTEGER, number TEXT, pin TEXT, balance INTEGER DEFAULT 0);\"\n\ncursor.execute(create_table)\nconn.commit()\n\n\ndef print_main():\n print(\"1. Create an account\\n2. Log into account\\n0. Exit\")\n return int(input())\n\n\ndef create_acct():\n global acct_num, pin, full_card_num, checksum\n # create card number and pin\n seed()\n acct_num = str(randint(100000000, 999999999))\n pin = str(randint(1000, 9999))\n # set checksum based on Luhn Algo\n num_minus_checksum = iin + acct_num\n num_list = list(map(int, list(num_minus_checksum)))\n number_sum = 0\n for i in range(len(num_list)):\n if i % 2 == 0:\n num = num_list[i] * 2\n if num > 9:\n num -= 9\n num_list[i] = num\n number_sum += num_list[i]\n checksum = str(10 - (number_sum % 10))\n if checksum == \"10\":\n checksum = \"0\"\n full_card_num = num_minus_checksum + checksum\n save_acct_info(full_card_num, pin)\n print(\"\\nYour card has been created\")\n print(\"Your card number:\\n{}\\nYour card PIN:\\n{}\\n\".format(full_card_num, pin))\n\n\ndef check_luhn(num: str):\n\n num_list = list(map(int, list(num[0:len(num) - 1])))\n last_digit = num[-1]\n number_sum = 0\n for i in range(len(num_list)):\n if i % 2 == 0:\n num = num_list[i] * 2\n if num > 9:\n num -= 9\n num_list[i] = num\n number_sum += num_list[i]\n this_checksum = str(10 - (number_sum % 10))\n if this_checksum == \"10\":\n this_checksum = \"0\"\n if this_checksum == last_digit:\n return False\n else:\n return True\n\n\ndef save_acct_info(num, this_pin):\n global conn, cursor\n query = \"insert into card (id,number,pin) values ({},{},{})\".format(randint(1, 100000), num, this_pin)\n cursor.execute(query)\n conn.commit()\n\n\ndef print_secondary():\n print(\"1. Balance\\n2. Add income\\n3. Do transfer\\n4. Close account\\n5. Log out\\n0. Exit\")\n return int(input())\n\n\ndef login():\n global acct_num, pin, balance\n\n print(\"\\nEnter your card number:\")\n this_card_num = str(input())\n print(\"Enter your PIN:\")\n this_pin = str(input())\n\n query = \"select * from card where number = {} and pin = {}\".format(this_card_num, this_pin)\n account = cursor.execute(query).fetchone()\n\n if not account:\n print(\"\\nWrong card number or PIN!\\n\")\n return False\n else:\n print(\"\\nYou have successfully logged in!\\n\")\n acct_num = account['number']\n pin = account['pin']\n balance = account['balance']\n return True\n\n\ndef print_balance():\n global balance\n print(\"\\nBalance: {}\\n\".format(balance))\n\n\ndef add_income():\n global balance, acct_num\n print(\"\\nEnter income:\")\n income = int(input())\n balance += income\n query = \"update card set balance = {} where number = {}\".format(balance, acct_num)\n cursor.execute(query)\n conn.commit()\n print(\"Income was added!\\n\")\n\n\ndef transfer():\n global balance, acct_num\n print(\"\\nTransfer\\nEnter card number:\")\n other_acct = str(input())\n count = cursor.execute(\"select count(*) from card where number = {}\".format(other_acct)).fetchone()[0]\n if other_acct == acct_num:\n print(\"You can't transfer money to the same account!\\n\")\n elif check_luhn(other_acct):\n print(\"Probably you made a mistake in the card number. Please try again!\\n\")\n elif count == 0:\n print(\"Such a card does not exist.\\n\")\n else:\n other_acct_balance = cursor.execute(\"select * from card where number = {}\".format(other_acct)).fetchone()['balance']\n # print(\"Other account balance:\", other_acct_balance)\n print(\"Enter how much money you want to transfer:\")\n trans_amt = int(input())\n if trans_amt > balance:\n print(\"Not enough money!\\n\")\n else:\n balance -= trans_amt\n other_acct_balance += trans_amt\n query1 = \"update card set balance = {} where number = {}\".format(balance, acct_num)\n query2 = \"update card set balance = {} where number = {}\".format(other_acct_balance, other_acct)\n cursor.execute(query1)\n cursor.execute(query2)\n conn.commit()\n print(\"Success!\\n\")\n\n\ndef delete_acct():\n global acct_num, logged_in\n query = \"delete from card where number = {}\".format(acct_num)\n cursor.execute(query)\n conn.commit()\n logged_in = False\n print(\"\\nThe account has been closed!\\n\")\n\n\ndef logout():\n global logged_in\n logged_in = False\n print(\"\\nYou have successfully logged out\\n\")\n\n\ndef leave():\n global logged_in, done\n logged_in = False\n done = True\n\n\nwhile not done:\n\n main_input = print_main()\n\n if main_input == 1:\n create_acct()\n elif main_input == 2:\n success = login()\n if success:\n logged_in = True\n while logged_in:\n secondary_input = print_secondary()\n if secondary_input == 1:\n print_balance()\n elif secondary_input == 2:\n add_income()\n elif secondary_input == 3:\n transfer()\n elif secondary_input == 4:\n delete_acct()\n elif secondary_input == 5:\n logout()\n else:\n leave()\n else:\n done = True\n\nprint(\"\\nBye!\")\n", "sub_path": "Simple Banking System/task/banking/banking.py", "file_name": "banking.py", "file_ext": "py", "file_size_in_byte": 5763, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sqlite3.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 17, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "277687271", "text": "import sys # the system library for controlling the screen \r\nimport pygame # this library is for creating 2D games\r\nfrom time import sleep\r\n\r\nfrom settings import Settings # using the 'Setting' class we build\r\nfrom ship import Ship\r\nfrom bullet import Bullet \r\nfrom alien import Alien \r\nfrom game_stats import GameStats \r\nfrom button import Button \r\nfrom scoreboard import ScoreBoard \r\nfrom util import save, load\r\n\r\nclass AlienInvasion:\r\n\t# the game class \r\n\tdef __init__(self):\r\n\t\t# class constructor\r\n\t\tpygame.init()\r\n\r\n\t\tself.settings = Settings() # setting instance\r\n\r\n\t\t# manually set the sizes of the frame\r\n\t\tself.screen = pygame.display.set_mode(\r\n\t\t\t(self.settings.screen_width, self.settings.screen_height)) # only takes one argument\r\n\r\n\t\tself.bg_image = pygame.image.load('images/background.png') # this is for setting the background image\r\n\t\tself.bg_image = pygame.transform.scale(self.bg_image, (self.settings.screen_width, self.settings.screen_height))\r\n\t\tself.bg_rect = self.bg_image.get_rect()\r\n\t\tself.bg_rect.midbottom = self.screen.get_rect().midbottom\r\n\r\n\t\t#self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN) # use fullscreen in pygame\r\n\t\t#self.settings.screen_width = self.screen.get_rect().width \r\n\t\t#self.settings.screen_height = self.screen.get_rect().height \r\n\r\n\t\tpygame.display.set_caption(\"Alien Invasion\")\r\n\r\n\t\tself.stats = GameStats(self)\r\n\t\tself.stats.high_score = load()\r\n\t\tself.sb = ScoreBoard(self)\r\n\r\n\t\tself.ship = Ship(self)\r\n\t\tself.bullets = pygame.sprite.Group() # creating a group sprite which is usefull in updating\r\n\t\tself.aliens = pygame.sprite.Group() # creating a group sprite for aliens same as bullets\r\n\r\n\t\tself._create_fleet()\r\n\r\n\t\tself.play_button = Button(self, \"Play\") # the game button\r\n\r\n\tdef _create_fleet(self):\r\n\t\t# this method places the aliens in rows and colls\r\n\t\talien = Alien(self)\r\n\r\n\t\talien_width, alien_height = alien.rect.size # getting the bounds\r\n\t\tavailable_space_x = self.settings.screen_width - (2 * alien_width) # calculations to find the maximum number of aliens in a line\r\n\t\tnumber_alien_x = available_space_x // (3 * alien_width) # the '//' is floor division\r\n\r\n\t\tship_height = self.ship.rect.height\r\n\t\tavailable_space_y = (self.settings.screen_height - 10 * ship_height - (4 * alien_height)) # calculations to find the maximum number of lines in screen\r\n\t\tnumber_alien_y = available_space_y // (2 * alien_height)\r\n\r\n\t\tfor row_number in range(number_alien_y):\r\n\t\t\tfor alien_number in range(number_alien_x): # creating a row of aliens\r\n\t\t\t\tself._create_alien(alien_number, row_number)\r\n\r\n\tdef _check_fleet_edges(self):\r\n\t\t# this method checks the overlapping of alien and walls\r\n\t\tfor alien in self.aliens.sprites():\r\n\t\t\tif alien.check_edges():\r\n\t\t\t\tself._change_fleet_direction()\r\n\t\t\t\tbreak\r\n\r\n\tdef _change_fleet_direction(self):\r\n\t\t# this method make the changes when aliens hit the walls\r\n\t\tfor alien in self.aliens.sprites():\r\n\t\t\talien.rect.y += self.settings.fleet_drop_down\r\n\t\tself.settings.fleet_direction *= -1\t\t\r\n\r\n\tdef _create_alien(self, alien_number, row_number):\r\n\t\t# this method creates a single alien \r\n\t\talien = Alien(self)\r\n\t\talien_width, alien_height = alien.rect.size\r\n\t\talien.x = alien_width + 3 * alien_width * alien_number\r\n\t\talien.rect.x = alien.x \r\n\t\talien.rect.y = 3 * alien.rect.height + 2 * alien.rect.height * row_number\r\n\t\tself.aliens.add(alien)\t\t\t\r\n\r\n\tdef run_game(self):\r\n\t\t# a method for starting the game process\r\n\t\t# the game main loop\r\n\t\twhile True:\r\n\t\t\tself._check_events()\r\n\r\n\t\t\tif self.stats.game_active: # check if the game is still on\r\n\t\t\t\tself.ship.update()\r\n\t\t\t\tself._update_bullets()\r\n\t\t\t\tself._update_aliens()\t\r\n\r\n\t\t\tself._update_screen()\r\n\r\n\tdef _check_events(self):\r\n\t\t# a method for getting the keyboard and mouse events\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tsave(self.stats.high_score)\r\n\t\t\t\tsys.exit()\t\r\n\t\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\t\tself._check_keydown_events(event)\t\r\n\t\t\telif event.type == pygame.KEYUP:\r\n\t\t\t\tself._check_keyup_events(event)\r\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\tmouse_pos = pygame.mouse.get_pos()\r\n\t\t\t\tself._check_play_button(mouse_pos)\t\r\n\t\t\t\t\r\n\tdef _check_keydown_events(self, event):\r\n\t\t# this method handels the pressing keys\r\n\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\tself.ship.moving_right = True\r\n\t\tif event.key == pygame.K_LEFT:\r\n\t\t\tself.ship.moving_left = True\r\n\t\tif event.key == pygame.K_q:\r\n\t\t\tsave(self.stats.high_score)\r\n\t\t\tsys.exit()\t\r\n\t\tif event.key == pygame.K_SPACE:\r\n\t\t\tself._fire_bullet()\r\n\t\tif event.key == pygame.K_p:\r\n\t\t\tself._do_play_click()\r\n\t\t\t\t\r\n\tdef _check_keyup_events(self, event):\t\r\n\t\t# this method handels the released keys\t\t\t\t\t\t\t\t\r\n\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\tself.ship.moving_right = False\r\n\t\tif event.key == pygame.K_LEFT:\r\n\t\t\tself.ship.moving_left = False\r\n\r\n\tdef _fire_bullet(self):\r\n\t\t# this method creates and adds new bullet to the bullets list\r\n\t\tif len(self.bullets) < self.settings.bullets_allowed: # a limit for bullets added\r\n\t\t\tnew_bullet = Bullet(self)\r\n\t\t\tself.bullets.add(new_bullet)\r\n\r\n\tdef _check_play_button(self, mouse_pos):\r\n\t\t# this method checks the mouse position and the button position\r\n\t\tif self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\r\n\t\t\tself._do_play_click()\r\n\r\n\tdef _do_play_click(self):\r\n\t\t# this method gets the player in the game\r\n\t\tself.stats.reset_stats()\r\n\t\tself.settings.initialize_dynamic_settings()\r\n\t\tself.aliens.empty()\r\n\t\tself.bullets.empty()\r\n\t\tself._create_fleet()\r\n\t\tself.sb.prep_score()\r\n\t\tself.sb.prep_level()\r\n\t\tself.sb.prep_ships()\r\n\t\tself.ship.center_ship()\r\n\t\tself.play_button.button_color = (0, 100, 0) # mouse hovering\r\n\t\tself.play_button._preg_msg('Play')\r\n\t\tself._update_screen()\r\n\t\tsleep(0.5) # a short break\r\n\t\tself.stats.game_active = True\t\r\n\t\tpygame.mouse.set_visible(False) # not showing the mouse\t\t\r\n\r\n\tdef _update_bullets(self):\r\n\t\t# this method will update the bullets\r\n\t\tself.bullets.update()\r\n\t\tfor bullet in self.bullets.copy(): # we use the copy method to create an other list for iteration\r\n\t\t\tif bullet.rect.bottom < 1:\r\n\t\t\t\tself.bullets.remove(bullet)\r\n\r\n\t\tself._check_alien_bullet_collision()\t\t\r\n\r\n\tdef _check_alien_bullet_collision(self):\r\n\t\t'''\r\n\t\tThe method bellow will get two groups and will check for any overlapping between\r\n\t\tthe elements in each group. If so it will remove them from their lists.\r\n\t\t'''\t\t\r\n\t\tcollisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)\r\n\r\n\t\tif collisions:\r\n\t\t\tfor aliens in collisions.values():\r\n\t\t\t\tself.stats.score += self.settings.alien_point * len(aliens)\r\n\t\t\tself.sb.prep_score()\t\r\n\t\t\tself.sb.check_high_score()\t\r\n\r\n\t\tif not self.aliens: # this is for recreating aliens when all are dead\r\n\t\t\tself.bullets.empty()\t\r\n\t\t\tself._create_fleet()\r\n\t\t\tself.settings.increase_speed()\r\n\r\n\t\t\tself.stats.level += 1\r\n\t\t\tself.sb.prep_level()\r\n\r\n\tdef _check_aliens_bottom(self):\r\n\t\t# this method checks if the aliens have reached the bottom of the screen\r\n\t\tscreen_rect = self.screen.get_rect()\r\n\t\tfor alien in self.aliens.sprites().copy():\r\n\t\t\tif alien.rect.bottom >= screen_rect.bottom:\r\n\t\t\t\tself._ship_hit()\r\n\t\t\t\tbreak\r\n\r\n\tdef _ship_hit(self):\r\n\t\t# this method is for restarting a game and make the changes\r\n\t\tif self.stats.ships_left > 0:\r\n\t\t\tself.stats.ships_left -= 1\r\n\t\t\tself.sb.prep_ships()\r\n\r\n\t\t\tself.aliens.empty()\r\n\t\t\tself.bullets.empty()\r\n\r\n\t\t\tself._create_fleet()\r\n\t\t\tself.ship.center_ship()\t\t\r\n\r\n\t\t\tsleep(1) # a puse for the user\t\r\n\t\telse:\r\n\t\t\tself.stats.game_active = False\r\n\t\t\tpygame.mouse.set_visible(True)\t\r\n\t\t\tself.play_button.button_color = (0, 255, 0)\r\n\t\t\tself.play_button._preg_msg('Play')\r\n\r\n\tdef _update_aliens(self):\r\n\t\t# this method updates the aliens movements\r\n\t\tself._check_fleet_edges()\t\r\n\t\tself.aliens.update()\r\n\r\n\t\tif pygame.sprite.spritecollideany(self.ship, self.aliens):\r\n\t\t\tself._ship_hit()\r\n\r\n\t\tself._check_aliens_bottom()\t\t\t\r\n\r\n\tdef _update_screen(self):\r\n\t\t# this method creates and shows the game screen to the user\t\r\n\t\tself.screen.blit(self.bg_image, self.bg_rect) # drawing the background image\r\n\t\tself.ship.blitme()\r\n\t\tfor bullet in self.bullets.sprites(): # group object only has update method which we overrode it\r\n\t\t\tbullet.draw_bullet()\r\n\t\tself.aliens.draw(self.screen) # drawing the aliens on the screen\r\n\t\tself.sb.show_score()\r\n\t\tif not self.stats.game_active:\r\n\t\t\tself.play_button.draw_button()\r\n\r\n\t\tpygame.display.flip() # making the frame visible\t\r\n\t\t\r\n\t\t\t\r\nif __name__ == '__main__': # creating an instance and start\r\n\tai = AlienInvasion()\r\n\tai.run_game()\t\t\t\t", "sub_path": "alien_invasion.py", "file_name": "alien_invasion.py", "file_ext": "py", "file_size_in_byte": 8520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 18, "usage_type": "call"}, {"api_name": "settings.Settings", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 35, "usage_type": "attribute"}, {"api_name": "game_stats.GameStats", "line_number": 37, "usage_type": "call"}, {"api_name": "util.load", "line_number": 38, "usage_type": "call"}, {"api_name": "scoreboard.ScoreBoard", "line_number": 39, "usage_type": "call"}, {"api_name": "ship.Ship", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 43, "usage_type": "attribute"}, {"api_name": "button.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "alien.Alien", "line_number": 51, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 53, "usage_type": "attribute"}, {"api_name": "alien.check_edges", "line_number": 68, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 75, "usage_type": "attribute"}, {"api_name": "alien.Alien", "line_number": 80, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 81, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 82, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 83, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 83, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 103, "usage_type": "attribute"}, {"api_name": "util.save", "line_number": 104, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 120, "usage_type": "attribute"}, {"api_name": "util.save", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.K_SPACE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.K_p", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 132, "usage_type": "attribute"}, {"api_name": "bullet.Bullet", "line_number": 138, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.mouse.set_visible", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 162, "usage_type": "attribute"}, {"api_name": "bullet.rect", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 178, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 198, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 214, "usage_type": "call"}, {"api_name": "pygame.mouse.set_visible", "line_number": 217, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 226, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 226, "usage_type": "attribute"}, {"api_name": "bullet.draw_bullet", "line_number": 236, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 242, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 242, "usage_type": "attribute"}]} +{"seq_id": "436678534", "text": "# Copyright 2011-2021 Doug Latornell and The University of British Columbia\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Driver module for SoG-bloomcast project\n\"\"\"\nfrom copy import copy\nimport datetime\nimport logging\nimport logging.handlers\nimport math\nimport os\nimport subprocess\nimport sys\nimport time\nimport arrow\nimport numpy as np\nfrom matplotlib.dates import (\n date2num,\n DateFormatter,\n DayLocator,\n HourLocator,\n MonthLocator,\n)\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nimport SOGcommand\nfrom .meteo import MeteoProcessor\nfrom .rivers import RiversProcessor\nfrom .utils import (\n Config,\n SOG_HoffmuellerProfile,\n SOG_Timeseries,\n)\nfrom .wind import WindProcessor\n\n# Bloom peak identification parameters based on:\n\n# Allen & Wolfe, 2013 [1]:\n\n# \"Although the idea of a spring bloom is well-defined, the exact\n# timing of a real spring bloom is not.\n# In Collins, et al, 2009 [2] the peak of the bloom was defined as the\n# highest concentration of phytoplankton unless an earlier bloom\n# (more than 5 days earlier) was associated with nitrate going to zero.\n# Gower, et al, 2013 [3],\n# using satellite data,\n# chooses a measure of the start of the bloom as the time when the\n# whole Strait of Georgia has high chlorophyll.\n# The nutritional quality of the phytoplankton appears to change when\n# they become nutrient limited Sastri & Dower, 2009 [4].\n# Thus here we use a definition that should delineate between nutrient\n# replete spring conditions and nutrient stressed summer conditions.\n# We use the peak phytoplankton concentration\n# (averaged from the surface to 3 m depth)\n# within four days of the average 0-3 m nitrate concentration going\n# below 0.5 uM (the half-saturation concentration) for two consecutive\n# days.\"\n\n# [1] Allen, S. E. and M. A. Wolfe,\n# Hindcast of the Timing of the Spring Phytoplankton Bloom in the Strait\n# of Georgia, 1968-2010.\n# Progress in Oceanography, vol 115 (2013), pp. 6-13.\n# http://dx.doi.org/10.1016/j.pocean.2013.05.026\n\n# [2] A.K. Collins, S.E. Allen, R. Pawlowicz,\n# The role of wind in determining the timing of the spring bloom in the\n# Strait of Georgia.\n# Canadian Journal of Fisheries and Aquatic Sciences, 66 (2009),\n# pp. 1597–1616.\n# http://dx.doi.org/10.1139/F09-071\n\n# [3] Gower, J., King, S., Statham, S., Fox, R., Young, E.,\n# The Malaspina Dragon: a new pattern of the early spring bloom in the\n# Strait of Georgia.\n# Progress in Oceanography 115 (2013), pp. 181–188.\n# http://dx.doi.org/10.1016/j.pocean.2013.05.024\n\n# [4] A.R. Sastri and J.F. Dower,\n# Interannual variability in chitobiase-based production rates of the\n# crustacean zooplankton community in the Strait of Georgia,\n# British Columbia, Canada.\n# Marine Ecology-Progress Series, 388 (2009), pp. 147–157.\n# http://dx.doi.org/10.3354/meps08111\nNITRATE_HALF_SATURATION_CONCENTRATION = 0.5 # uM\nPHYTOPLANKTON_PEAK_WINDOW_HALF_WIDTH = 4 # days\n\n\nlog = logging.getLogger('bloomcast')\nbloom_date_log = logging.getLogger('bloomcast.bloom_date')\n\n\nclass NoNewWindData(Exception):\n pass\n\n\nclass Bloomcast(object):\n \"\"\"Strait of Georgia spring diatom bloom predictor.\n\n :arg config_file: Path for the bloomcast configuration file.\n :type config_file: string\n \"\"\"\n # Colours for graph lines\n nitrate_colours = {'avg': '#30b8b8', 'bounds': '#82dcdc'}\n diatoms_colours = {'avg': 'green', 'bounds': '#56c056'}\n temperature_colours = {'avg': 'red', 'bounds': '#ff7373'}\n salinity_colours = {'avg': 'blue', 'bounds': '#7373ff'}\n\n def __init__(self, config_file, data_date):\n self.config = Config()\n self.config.load_config(config_file)\n # Wind data date for development and debugging; overwritten if\n # wind forcing data is collected and processed\n self.config.data_date = data_date\n\n def run(self):\n \"\"\"Execute the bloomcast prediction and report its results.\n\n * Load the process configuration data.\n\n * Get the wind forcing data.\n\n * Get the meteorological and river flow forcing data.\n\n * Run the SOG code.\n\n * Calculate the spring diatom bloom date.\n \"\"\"\n self._configure_logging()\n if not self.config.get_forcing_data and self.config.data_date is None:\n log.debug(\n 'This will not end well: '\n 'get_forcing_data={0.get_forcing_data} '\n 'and data_date={0.data_date}'.format(self.config))\n return\n log.debug('run start date/time is {0:%Y-%m-%d %H:%M:%S}'\n .format(self.config.run_start_date))\n # Check run start date and current date to ensure that\n # river flow data are available.\n # River flow data are only available in a rolling 18-month window.\n run_start_yr_jan1 = (\n arrow.get(self.config.run_start_date).replace(month=1, day=1))\n river_date_limit = arrow.now().replace(months=-18)\n if run_start_yr_jan1 < river_date_limit:\n log.error(\n 'A bloomcast run starting {0.run_start_date:%Y-%m-%d} cannot '\n 'be done today because there are no river flow data availble '\n 'prior to {1}'\n .format(self.config, river_date_limit.format('YYYY-MM-DD')))\n return\n try:\n self._get_forcing_data()\n except NoNewWindData:\n log.info('Wind data date {0:%Y-%m-%d} is unchanged since last run'\n .format(self.config.data_date))\n return\n self._run_SOG()\n self._get_results_timeseries()\n self._create_timeseries_graphs()\n self._get_results_profiles()\n self._create_profile_graphs()\n self._calc_bloom_date()\n\n def _configure_logging(self):\n \"\"\"Configure logging of debug & warning messages to console\n and email.\n\n Debug logging on/off & email recipient(s) for warning messages\n are set in config file.\n \"\"\"\n log.setLevel(logging.DEBUG)\n\n def patched_data_filter(record):\n if (record.funcName == 'patch_data'\n and 'data patched' in record.msg):\n return 0\n return 1\n\n console = logging.StreamHandler()\n console.setFormatter(\n logging.Formatter('%(levelname)s:%(name)s:%(message)s'))\n console.setLevel(logging.INFO)\n if self.config.logging.debug:\n console.setLevel(logging.DEBUG)\n console.addFilter(patched_data_filter)\n log.addHandler(console)\n\n disk = logging.handlers.RotatingFileHandler(\n self.config.logging.bloomcast_log_filename, maxBytes=1024 * 1024)\n disk.setFormatter(\n logging.Formatter(\n '%(asctime)s %(levelname)s [%(name)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M'))\n disk.setLevel(logging.DEBUG)\n log.addHandler(disk)\n\n mailhost = (('localhost', 1025) if self.config.logging.use_test_smtpd\n else 'smtp.eos.ubc.ca')\n email = logging.handlers.SMTPHandler(\n mailhost, fromaddr='SoG-bloomcast@eos.ubc.ca',\n toaddrs=self.config.logging.toaddrs,\n subject='Warning Message from SoG-bloomcast',\n timeout=10.0,\n )\n email.setFormatter(\n logging.Formatter('%(levelname)s:%(name)s:%(message)s'))\n email.setLevel(logging.WARNING)\n log.addHandler(email)\n\n bloom_date_evolution = logging.FileHandler(\n self.config.logging.bloom_date_log_filename)\n bloom_date_evolution.setFormatter(logging.Formatter('%(message)s'))\n bloom_date_evolution.setLevel(logging.INFO)\n bloom_date_log.addHandler(bloom_date_evolution)\n bloom_date_log.propagate = False\n\n def _get_forcing_data(self):\n \"\"\"Collect and process forcing data.\n \"\"\"\n if not self.config.get_forcing_data:\n log.info('Skipped collection and processing of forcing data')\n return\n wind = WindProcessor(self.config)\n self.config.data_date = wind.make_forcing_data_file()\n log.info('based on wind data forcing data date is {}'\n .format(self.config.data_date.format('YYYY-MM-DD')))\n try:\n with open('wind_data_date', 'rt') as f:\n last_data_date = arrow.get(f.readline().strip()).date()\n except IOError:\n # Fake a wind data date to get things rolling\n last_data_date = self.config.run_start_date.date()\n if self.config.data_date == last_data_date:\n raise NoNewWindData\n else:\n with open('wind_data_date', 'wt') as f:\n f.write(\n '{}\\n'.format(self.config.data_date.format('YYYY-MM-DD')))\n meteo = MeteoProcessor(self.config)\n meteo.make_forcing_data_files()\n rivers = RiversProcessor(self.config)\n rivers.make_forcing_data_files()\n\n def _run_SOG(self):\n \"\"\"Run SOG.\n \"\"\"\n if not self.config.run_SOG:\n log.info('Skipped running SOG')\n return\n processes = {}\n base_infile = self.config.infiles['base']\n for key in self.config.infiles['edits']:\n proc = SOGcommand.api.run(\n self.config.SOG_executable,\n base_infile,\n self.config.infiles['edits'][key],\n key + '.stdout')\n processes[key] = proc\n log.info('SOG {0} run started at {1:%Y-%m-%d %H:%M:%S} as pid {2}'\n .format(key, datetime.datetime.now(), proc.pid))\n while processes:\n time.sleep(30)\n for key, proc in copy(processes).items():\n if proc.poll() is None:\n continue\n else:\n processes.pop(key)\n log.info('SOG {0} run finished at {1:%Y-%m-%d %H:%M:%S}'\n .format(key, datetime.datetime.now()))\n\n def _get_results_timeseries(self):\n \"\"\"Read SOG results time series of interest and create\n SOG_Timeseries objects from them.\n \"\"\"\n self.nitrate, self.diatoms = {}, {}\n self.temperature, self.salinity = {}, {}\n self.mixing_layer_depth = {}\n for key in self.config.infiles['edits']:\n std_bio_ts_outfile = self.config.std_bio_ts_outfiles[key]\n std_phys_ts_outfile = self.config.std_phys_ts_outfiles[key]\n self.nitrate[key] = SOG_Timeseries(std_bio_ts_outfile)\n self.nitrate[key].read_data(\n 'time', '3 m avg nitrate concentration')\n self.nitrate[key].calc_mpl_dates(self.config.run_start_date)\n self.diatoms[key] = SOG_Timeseries(std_bio_ts_outfile)\n self.diatoms[key].read_data(\n 'time', '3 m avg micro phytoplankton biomass')\n self.diatoms[key].calc_mpl_dates(self.config.run_start_date)\n self.temperature[key] = SOG_Timeseries(std_phys_ts_outfile)\n self.temperature[key].read_data('time', '3 m avg temperature')\n self.temperature[key].calc_mpl_dates(self.config.run_start_date)\n self.salinity[key] = SOG_Timeseries(std_phys_ts_outfile)\n self.salinity[key].read_data('time', '3 m avg salinity')\n self.salinity[key].calc_mpl_dates(self.config.run_start_date)\n self.mixing_layer_depth[key] = SOG_Timeseries(std_phys_ts_outfile)\n self.mixing_layer_depth[key].read_data(\n 'time', 'mixing layer depth')\n self.mixing_layer_depth[key].calc_mpl_dates(\n self.config.run_start_date)\n\n def _create_timeseries_graphs(self):\n \"\"\"Create time series graph objects.\n \"\"\"\n self.fig_nitrate_diatoms_ts = self._two_axis_timeseries(\n self.nitrate, self.diatoms,\n titles=('3 m Avg Nitrate Concentration [uM N]',\n '3 m Avg Diatom Biomass [uM N]'),\n colors=(self.nitrate_colours, self.diatoms_colours))\n self.fig_temperature_salinity_ts = self._two_axis_timeseries(\n self.temperature, self.salinity,\n titles=('3 m Avg Temperature [deg C]',\n '3 m Avg Salinity [-]'),\n colors=(self.temperature_colours, self.salinity_colours))\n self.fig_mixing_layer_depth_ts = self._mixing_layer_depth_timeseries()\n\n def _two_axis_timeseries(self, left_ts, right_ts, titles, colors):\n \"\"\"Create a time series graph figure object with 2 time series\n plotted on the left and right y axes.\n \"\"\"\n fig = Figure((8, 3), facecolor='white')\n ax_left = fig.add_subplot(1, 1, 1)\n ax_left.set_position((0.125, 0.1, 0.775, 0.75))\n fig.ax_left = ax_left\n ax_right = ax_left.twinx()\n ax_right.set_position(ax_left.get_position())\n predicate = (left_ts['avg_forcing'].mpl_dates\n >= date2num(self.config.data_date))\n for key in 'early_bloom_forcing late_bloom_forcing'.split():\n ax_left.plot(left_ts[key].mpl_dates[predicate],\n left_ts[key].dep_data[predicate],\n color=colors[0]['bounds'])\n ax_right.plot(right_ts[key].mpl_dates[predicate],\n right_ts[key].dep_data[predicate],\n color=colors[1]['bounds'])\n ax_left.plot(left_ts['avg_forcing'].mpl_dates,\n left_ts['avg_forcing'].dep_data,\n color=colors[0]['avg'])\n ax_right.plot(right_ts['avg_forcing'].mpl_dates,\n right_ts['avg_forcing'].dep_data,\n color=colors[1]['avg'])\n ax_left.set_ylabel(titles[0], color=colors[0]['avg'], size='x-small')\n ax_right.set_ylabel(titles[1], color=colors[1]['avg'], size='x-small')\n # Add line to mark switch from actual to averaged forcing data\n fig.data_date_line = ax_left.axvline(\n date2num(self.config.data_date), color='black')\n # Format x-axis\n ax_left.xaxis.set_major_locator(MonthLocator())\n ax_left.xaxis.set_major_formatter(DateFormatter('%j\\n%b'))\n for axis in (ax_left, ax_right):\n for label in axis.get_xticklabels() + axis.get_yticklabels():\n label.set_size('x-small')\n ax_left.set_xlim(\n (int(left_ts['avg_forcing'].mpl_dates[0]),\n math.ceil(left_ts['avg_forcing'].mpl_dates[-1])))\n ax_left.set_xlabel(\n 'Year-days in {0} and {1}'\n .format(self.config.run_start_date.year,\n self.config.run_start_date.year + 1),\n size='x-small')\n return fig\n\n def _mixing_layer_depth_timeseries(self):\n \"\"\"Create a time series graph figure object of the mixing\n layer depth on the wind data date and the 6 days preceding it.\n \"\"\"\n fig = Figure((8, 3), facecolor='white')\n ax = fig.add_subplot(1, 1, 1)\n ax.set_position((0.125, 0.1, 0.775, 0.75))\n predicate = np.logical_and(\n self.mixing_layer_depth['avg_forcing'].mpl_dates\n > date2num(self.config.data_date - datetime.timedelta(days=6)),\n self.mixing_layer_depth['avg_forcing'].mpl_dates\n <= date2num(self.config.data_date + datetime.timedelta(days=1)))\n mpl_dates = self.mixing_layer_depth['avg_forcing'].mpl_dates[predicate]\n dep_data = self.mixing_layer_depth['avg_forcing'].dep_data[predicate]\n ax.plot(mpl_dates, dep_data, color='magenta')\n ax.set_ylabel(\n 'Mixing Layer Depth [m]', color='magenta', size='x-small')\n # Add line to mark profile time\n profile_datetime = datetime.datetime.combine(\n self.config.data_date, datetime.time(12))\n profile_datetime_line = ax.axvline(\n date2num(profile_datetime), color='black')\n ax.xaxis.set_major_locator(DayLocator())\n ax.xaxis.set_major_formatter(DateFormatter('%j\\n%d-%b'))\n ax.xaxis.set_minor_locator(HourLocator(interval=6))\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_size('x-small')\n ax.set_xlim((int(mpl_dates[0]), math.ceil(mpl_dates[-1])))\n ax.set_xlabel('Year-Day', size='x-small')\n fig.legend(\n [profile_datetime_line], ['Profile Time'],\n loc='upper right', prop={'size': 'xx-small'})\n return fig\n\n def _get_results_profiles(self):\n \"\"\"Read SOG results profiles of interest and create\n SOG_HoffmuellerProfile objects from them.\n \"\"\"\n self.nitrate_profile, self.diatoms_profile = {}, {}\n self.temperature_profile, self.salinity_profile = {}, {}\n for key in self.config.infiles['edits']:\n Hoffmueller_outfile = (\n self.config.Hoffmueller_profiles_outfiles[key])\n profile_number = (\n self.config.data_date - self.config.run_start_date.date()).days\n self.nitrate_profile[key] = SOG_HoffmuellerProfile(\n Hoffmueller_outfile)\n self.nitrate_profile[key].read_data(\n 'depth', 'nitrate', profile_number)\n self.diatoms_profile[key] = SOG_HoffmuellerProfile(\n Hoffmueller_outfile)\n self.diatoms_profile[key].read_data(\n 'depth', 'micro phytoplankton', profile_number)\n self.temperature_profile[key] = SOG_HoffmuellerProfile(\n Hoffmueller_outfile)\n self.temperature_profile[key].read_data(\n 'depth', 'temperature', profile_number)\n self.salinity_profile[key] = SOG_HoffmuellerProfile(\n Hoffmueller_outfile)\n self.salinity_profile[key].read_data(\n 'depth', 'salinity', profile_number)\n\n def _create_profile_graphs(self):\n \"\"\"Create profile graph objects.\n \"\"\"\n profile_datetime = datetime.datetime.combine(\n self.config.data_date, datetime.time(12))\n profile_dt = profile_datetime - self.config.run_start_date\n profile_hour = profile_dt.days * 24 + profile_dt.seconds / 3600\n self.mixing_layer_depth['avg_forcing'].boolean_slice(\n self.mixing_layer_depth['avg_forcing'].indep_data >= profile_hour)\n mixing_layer_depth = self.mixing_layer_depth['avg_forcing'].dep_data[0]\n self.fig_temperature_salinity_profile = self._two_axis_profile(\n self.temperature_profile['avg_forcing'],\n self.salinity_profile['avg_forcing'],\n mixing_layer_depth,\n titles=('Temperature [deg C]', 'Salinity [-]'),\n colors=(self.temperature_colours, self.salinity_colours),\n limits=((4, 10), (20, 30)))\n self.fig_nitrate_diatoms_profile = self._two_axis_profile(\n self.nitrate_profile['avg_forcing'],\n self.diatoms_profile['avg_forcing'],\n mixing_layer_depth,\n titles=('Nitrate Concentration [uM N]', 'Diatom Biomass [uM N]'),\n colors=(self.nitrate_colours, self.diatoms_colours))\n\n def _two_axis_profile(self, top_profile, bottom_profile,\n mixing_layer_depth, titles, colors, limits=None):\n \"\"\"Create a profile graph figure object with 2 profiles\n plotted on the top and bottom x axes.\n \"\"\"\n fig = Figure((4, 8), facecolor='white')\n ax_bottom = fig.add_subplot(1, 1, 1)\n ax_bottom.set_position((0.19, 0.1, 0.5, 0.8))\n ax_top = ax_bottom.twiny()\n ax_top.set_position(ax_bottom.get_position())\n ax_top.plot(\n top_profile.dep_data, top_profile.indep_data,\n color=colors[0]['avg'])\n ax_top.set_xlabel(titles[0], color=colors[0]['avg'], size='small')\n ax_bottom.plot(bottom_profile.dep_data, bottom_profile.indep_data,\n color=colors[1]['avg'])\n ax_bottom.set_xlabel(titles[1], color=colors[1]['avg'], size='small')\n for axis in (ax_bottom, ax_top):\n for label in axis.get_xticklabels() + axis.get_yticklabels():\n label.set_size('x-small')\n if limits is not None:\n ax_top.set_xlim(limits[0])\n ax_bottom.set_xlim(limits[1])\n ax_bottom.axhline(mixing_layer_depth, color='black')\n ax_bottom.text(\n x=ax_bottom.get_xlim()[1], y=mixing_layer_depth,\n s=' Mixing Layer\\n Depth = {0:.2f} m'.format(mixing_layer_depth),\n verticalalignment='center', size='small')\n ax_bottom.set_ylim(\n (bottom_profile.indep_data[-1], bottom_profile.indep_data[0]))\n ax_bottom.set_ylabel('Depth [m]', size='small')\n return fig\n\n def _calc_bloom_date(self):\n \"\"\"Calculate the predicted spring bloom date.\n \"\"\"\n key = 'avg_forcing'\n self.bloom_date, self.bloom_biomass = {}, {}\n for key in self.config.infiles['edits']:\n self._clip_results_to_jan1(key)\n self._reduce_results_to_daily(key)\n first_low_nitrate_days = self._find_low_nitrate_days(\n key, NITRATE_HALF_SATURATION_CONCENTRATION)\n self._find_phytoplankton_peak(\n key, first_low_nitrate_days,\n PHYTOPLANKTON_PEAK_WINDOW_HALF_WIDTH)\n if self.config.get_forcing_data or self.config.run_SOG:\n line = (' {0} {1} {2:.4f}'\n .format(self.config.data_date.format('YYYY-MM-DD'),\n self.bloom_date['avg_forcing'],\n self.bloom_biomass['avg_forcing']))\n for key in 'early_bloom_forcing late_bloom_forcing'.split():\n line += (' {0} {1:.4f}'\n .format(self.bloom_date[key],\n self.bloom_biomass[key]))\n bloom_date_log.info(line)\n\n def _clip_results_to_jan1(self, key):\n \"\"\"Clip the nitrate concentration and diatom biomass results\n so that they start on 1-Jan of the bloom year.\n \"\"\"\n jan1 = datetime.datetime(self.config.run_start_date.year + 1, 1, 1)\n discard_hours = jan1 - self.config.run_start_date\n discard_hours = discard_hours.days * 24 + discard_hours.seconds / 3600\n predicate = self.nitrate[key].indep_data >= discard_hours\n self.nitrate[key].boolean_slice(predicate)\n self.diatoms[key].boolean_slice(predicate)\n\n def _reduce_results_to_daily(self, key):\n \"\"\"Reduce the nitrate concentration and diatom biomass results\n to daily values.\n\n Nitrate concentrations are daily minimum values.\n\n Diatom biomasses are daily maximum values.\n\n Independent data values are dates.\n \"\"\"\n # Assume that there are an integral nummber of SOG time steps in a\n # day\n day_slice = 86400 // self.config.SOG_timestep\n day_iterator = range(\n 0, self.nitrate[key].dep_data.shape[0] - day_slice, day_slice)\n jan1 = datetime.date(self.config.run_start_date.year + 1, 1, 1)\n self.nitrate[key].dep_data = np.array(\n [self.nitrate[key].dep_data[i:i + day_slice].min()\n for i in day_iterator])\n self.nitrate[key].indep_data = np.array(\n [jan1 + datetime.timedelta(days=i)\n for i in range(self.nitrate[key].dep_data.size)])\n day_iterator = range(\n 0, self.diatoms[key].dep_data.shape[0] - day_slice, day_slice)\n self.diatoms[key].dep_data = np.array(\n [self.diatoms[key].dep_data[i:i + day_slice].max()\n for i in day_iterator])\n self.diatoms[key].indep_data = np.array(\n [jan1 + datetime.timedelta(days=i)\n for i in range(self.diatoms[key].dep_data.size)])\n\n def _find_low_nitrate_days(self, key, threshold):\n \"\"\"Return the start and end dates of the first 2 day period in\n which the nitrate concentration is below the ``threshold``.\n \"\"\"\n key_string = key.replace('_', ' ')\n self.nitrate[key].boolean_slice(\n self.nitrate[key].dep_data <= threshold)\n log.debug('Dates on which nitrate was <= {0} uM N with {1}:\\n{2}'\n .format(threshold, key_string, self.nitrate[key].indep_data))\n log.debug('Nitrate <= {0} uM N with {1}:\\n{2}'\n .format(threshold, key_string, self.nitrate[key].dep_data))\n for i in range(self.nitrate[key].dep_data.shape[0]):\n low_nitrate_day_1 = self.nitrate[key].indep_data[i]\n days = self.nitrate[key].indep_data[i + 1] - low_nitrate_day_1\n if days == datetime.timedelta(days=1):\n low_nitrate_day_2 = self.nitrate[key].indep_data[i + 1]\n break\n return low_nitrate_day_1, low_nitrate_day_2\n\n def _find_phytoplankton_peak(self, key, first_low_nitrate_days,\n peak_half_width):\n \"\"\"Return the date with ``peak_half_width`` of the\n ``first_low_nitrate_days`` on which the diatoms biomass is the\n greatest.\n \"\"\"\n key_string = key.replace('_', ' ')\n half_width_days = datetime.timedelta(days=peak_half_width)\n early_bloom_date = first_low_nitrate_days[0] - half_width_days\n late_bloom_date = first_low_nitrate_days[1] + half_width_days\n log.debug('Bloom window for {0} is between {1} and {2}'\n .format(key_string, early_bloom_date, late_bloom_date))\n self.diatoms[key].boolean_slice(\n self.diatoms[key].indep_data >= early_bloom_date)\n self.diatoms[key].boolean_slice(\n self.diatoms[key].indep_data <= late_bloom_date)\n log.debug('Dates in {0} bloom window:\\n{1}'\n .format(key_string, self.diatoms[key].indep_data))\n log.debug('Micro phytoplankton biomass values in '\n '{0} bloom window:\\n{1}'\n .format(key_string, self.diatoms[key].dep_data))\n bloom_date_index = self.diatoms[key].dep_data.argmax()\n self.bloom_date[key] = self.diatoms[key].indep_data[bloom_date_index]\n self.bloom_biomass[key] = self.diatoms[key].dep_data[bloom_date_index]\n log.info('Predicted {0} bloom date is {1}'\n .format(key_string, self.bloom_date[key]))\n log.debug(\n 'Phytoplankton biomass on {0} bloom date is {1} uM N'\n .format(key_string, self.bloom_biomass[key]))\n\n\ndef clip_results_to_jan1(nitrate, diatoms, run_start_date):\n \"\"\"Clip the nitrate concentration and diatom biomass results\n so that they start on 1-Jan of the bloom year.\n\n :arg nitrate: Nitrate concentration timeseries\n :type nitrate: dict of :py:class:`bloomcast.utils.SOG_Timeseries`\n instances keyed by ensemble member identifier\n\n :arg diatoms: Diatom biomass timeseries\n :type diatoms: dict of :py:class:`bloomcast.utils.SOG_Timeseries`\n instances keyed by ensemble member identifier\n\n :arg run_start_date: SOG run start date\n :type run_start_date: :py:class:`datetime.date`\n \"\"\"\n jan1 = datetime.datetime(run_start_date.year + 1, 1, 1)\n discard_hours = jan1 - run_start_date\n discard_hours = discard_hours.days * 24 + discard_hours.seconds / 3600\n for member in nitrate:\n predicate = nitrate[member].indep_data >= discard_hours\n nitrate[member].boolean_slice(predicate)\n diatoms[member].boolean_slice(predicate)\n\n\ndef reduce_results_to_daily(nitrate, diatoms, run_start_date, SOG_timestep):\n \"\"\"Reduce the nitrate concentration and diatom biomass results\n to daily values.\n\n Nitrate concentrations are daily minimum values.\n\n Diatom biomasses are daily maximum values.\n\n Independent data values are dates.\n\n :arg nitrate: Nitrate concentration timeseries\n :type nitrate: dict of :py:class:`bloomcast.utils.SOG_Timeseries`\n instances keyed by ensemble member identifier\n\n :arg diatoms: Diatom biomass timeseries\n :type diatoms: dict of :py:class:`bloomcast.utils.SOG_Timeseries`\n instances keyed by ensemble member identifier\n\n :arg run_start_date: SOG run start date\n :type run_start_date: :py:class:`datetime.date`\n\n :arg SOG_timestep: SOG run time-step\n :type SOG_timestep: int\n \"\"\"\n # Assume that there are an integral nummber of SOG time steps in a\n # day\n day_slice = 86400 // SOG_timestep\n jan1 = datetime.date(run_start_date.year + 1, 1, 1)\n for member in nitrate:\n last_day = nitrate[member].dep_data.shape[0] - day_slice\n day_iterator = range(0, last_day, day_slice)\n nitrate[member].dep_data = np.array(\n [nitrate[member].dep_data[i:i + day_slice].min()\n for i in day_iterator])\n nitrate[member].indep_data = np.array(\n [jan1 + datetime.timedelta(days=i)\n for i in range(nitrate[member].dep_data.size)])\n\n last_day = diatoms[member].dep_data.shape[0] - day_slice\n day_iterator = range(0, last_day, day_slice)\n diatoms[member].dep_data = np.array(\n [diatoms[member].dep_data[i:i + day_slice].max()\n for i in day_iterator])\n diatoms[member].indep_data = np.array(\n [jan1 + datetime.timedelta(days=i)\n for i in range(diatoms[member].dep_data.size)])\n\n\ndef find_low_nitrate_days(nitrate, threshold):\n \"\"\"Return the start and end dates of the first 2 day period in\n which the nitrate concentration is below the ``threshold``.\n \"\"\"\n first_low_nitrate_days = {}\n for member in nitrate:\n nitrate[member].boolean_slice(nitrate[member].dep_data <= threshold)\n for i in range(nitrate[member].dep_data.shape[0]):\n low_nitrate_day_1 = nitrate[member].indep_data[i]\n days = nitrate[member].indep_data[i + 1] - low_nitrate_day_1\n if days == datetime.timedelta(days=1):\n low_nitrate_day_2 = nitrate[member].indep_data[i + 1]\n break\n first_low_nitrate_days[member] = (low_nitrate_day_1, low_nitrate_day_2)\n return first_low_nitrate_days\n\n\ndef find_phytoplankton_peak(diatoms, first_low_nitrate_days, peak_half_width):\n \"\"\"Return the date within ``peak_half_width`` of the\n ``first_low_nitrate_days`` on which the diatoms biomass is the\n greatest.\n \"\"\"\n half_width_days = datetime.timedelta(days=peak_half_width)\n bloom_dates, bloom_biomasses = {}, {}\n for member in diatoms:\n bloom_window_start = (\n first_low_nitrate_days[member][0] - half_width_days)\n bloom_window_end = (\n first_low_nitrate_days[member][1] + half_width_days)\n diatoms[member].boolean_slice(\n diatoms[member].indep_data >= bloom_window_start)\n diatoms[member].boolean_slice(\n diatoms[member].indep_data <= bloom_window_end)\n bloom_date_index = diatoms[member].dep_data.argmax()\n bloom_dates[member] = diatoms[member].indep_data[bloom_date_index]\n bloom_biomasses[member] = diatoms[member].dep_data[bloom_date_index]\n return bloom_dates, bloom_biomasses\n\n\ndef main():\n try:\n config_file = sys.argv[1]\n except IndexError:\n print('Expected config file path/name')\n sys.exit(1)\n try:\n data_date = arrow.get(sys.argv[2])\n except ValueError:\n print('Expected %Y-%m-%d for data date, got: {0[2]}'.format(sys.argv))\n sys.exit(1)\n except IndexError:\n data_date = None\n bloomcast = Bloomcast(config_file, data_date)\n bloomcast.run()\n", "sub_path": "bloomcast/bloomcast.py", "file_name": "bloomcast.py", "file_ext": "py", "file_size_in_byte": 32169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 99, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.Config", "line_number": 120, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 152, "usage_type": "call"}, {"api_name": "arrow.now", "line_number": 153, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 181, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 189, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 191, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 192, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 194, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 198, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 198, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 204, "usage_type": "attribute"}, {"api_name": "logging.handlers.SMTPHandler", "line_number": 209, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 209, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 216, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 217, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 220, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 222, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 223, "usage_type": "attribute"}, {"api_name": "wind.WindProcessor", "line_number": 233, "usage_type": "call"}, {"api_name": "wind.make_forcing_data_file", "line_number": 234, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 239, "usage_type": "call"}, {"api_name": "meteo.MeteoProcessor", "line_number": 249, "usage_type": "call"}, {"api_name": "meteo.make_forcing_data_files", "line_number": 250, "usage_type": "call"}, {"api_name": "rivers.RiversProcessor", "line_number": 251, "usage_type": "call"}, {"api_name": "rivers.make_forcing_data_files", "line_number": 252, "usage_type": "call"}, {"api_name": "SOGcommand.api.run", "line_number": 263, "usage_type": "call"}, {"api_name": "SOGcommand.api", "line_number": 263, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 270, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 270, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 272, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 273, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 279, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 279, "usage_type": "attribute"}, {"api_name": "utils.SOG_Timeseries", "line_number": 291, "usage_type": "call"}, {"api_name": "utils.SOG_Timeseries", "line_number": 295, "usage_type": "call"}, {"api_name": "utils.SOG_Timeseries", "line_number": 299, "usage_type": "call"}, {"api_name": "utils.SOG_Timeseries", "line_number": 302, "usage_type": "call"}, {"api_name": "utils.SOG_Timeseries", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.dates.date2num", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.dates.date2num", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.dates.MonthLocator", "line_number": 357, "usage_type": "call"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 358, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 379, "usage_type": "call"}, {"api_name": "matplotlib.dates.date2num", "line_number": 381, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 381, "usage_type": "call"}, {"api_name": "matplotlib.dates.date2num", "line_number": 383, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 383, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 390, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 390, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.dates.date2num", "line_number": 393, "usage_type": "call"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 394, "usage_type": "call"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 395, "usage_type": "call"}, {"api_name": "matplotlib.dates.HourLocator", "line_number": 396, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 399, "usage_type": "call"}, {"api_name": "utils.SOG_HoffmuellerProfile", "line_number": 417, "usage_type": "call"}, {"api_name": "utils.SOG_HoffmuellerProfile", "line_number": 421, "usage_type": "call"}, {"api_name": "utils.SOG_HoffmuellerProfile", "line_number": 425, "usage_type": "call"}, {"api_name": "utils.SOG_HoffmuellerProfile", "line_number": 429, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 437, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 437, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 438, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 463, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 519, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 541, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 545, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 553, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 554, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 571, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 583, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 622, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 658, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 662, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 665, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 666, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 671, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 674, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 675, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 689, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 701, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 720, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 723, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 725, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 725, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 727, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 728, "usage_type": "call"}]} +{"seq_id": "268357609", "text": "from collections import defaultdict\n\ndef read_lines(filename):\n with open(filename) as fi:\n lines = [w for w in fi.readlines()]\n return lines\n\nenglish = read_lines('data/hansards.e')\nfrench = read_lines('data/hansards.f')\nalign = read_lines('data/hansards.a')\n\nbitext = [[sentence.strip().split() for sentence in pair] for pair in zip(french, english)]\n\nnum = 100\n\ntrain = bitext[:num]\n\n\ndef em(train):\n f_count = defaultdict(int)\n e_count = defaultdict(int)\n fe_count = defaultdict(float)\n for (fs, es) in train:\n for fw in set(fs):\n f_count[fw] += 1\n for ew in set(es):\n e_count[ew] += 1\n for fw in f_count.keys():\n for ew in e_count.keys():\n fe_count[(fw, ew)] += 1.\n for i in range(100):\n # Expectation\n count_fe = defaultdict(float)\n marg_f = defaultdict(float)\n for (fs, es) in train:\n norm_e = defaultdict(float)\n for ew in es:\n norm_e[ew] = 0\n for fw in fs:\n norm_e[ew] += fe_count.get((fw, ew), 0)\n for ew in es:\n for fw in fs:\n c = (fe_count[(fw, ew)] / norm_e[ew]) \n count_fe[(fw, ew)] += c\n marg_f[fw] += c\n # Maximization\n for fw in f_count.keys():\n for ew in e_count.keys():\n fe_count[(fw, ew)] = count_fe[(fw, ew)] / marg_f[fw]\n return fe_count\n\ndef align(text, fe_count):\n for (fs, es) in text:\n alignment = []\n for i, fw in enumerate(fs):\n best_p, best_j = 0, 0\n for j, ew in enumerate(es):\n p = fe_count[(fw, ew)]\n if p > best_p:\n best_p, best_j = p, j\n alignment += [(i, best_j)]\n print(' '.join([str(y)+'-'+str(x) for x,y in alignment]))\n\nfe_count = em(train)\nalign(train, fe_count)\n", "sub_path": "ibm_model1.py", "file_name": "ibm_model1.py", "file_ext": "py", "file_size_in_byte": 1911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.defaultdict", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 21, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "229525272", "text": "from sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nimport numpy as np\nimport subprocess\nimport pickle\nimport math\nimport os\n\n\"\"\"\nDefine the haversine distance function\n\"\"\"\ndef haversine_distance(point1, point2):\n \"\"\"\n Computes the haversine distance (in meters) between \n two points in the global coordinate systems.\n Source: http://www.movable-type.co.uk/scripts/latlong.html\n point1 - tuple (latitude, longitude) in degrees\n point2 - tuple (latitude, longitude) in degrees\n \"\"\"\n lat1 = math.radians(point1[0])\n lat2 = math.radians(point2[0])\n delta_lat = point1[0] - point2[0]\n delta_long = point1[1] - point2[1]\n\n R = 6371000.0 # In meters\n a = math.sin(math.radians(delta_lat)/2)**2 + math.cos(lat1)*math.cos(lat2)*(math.sin(math.radians(delta_long)/2)**2)\n c = 2*math.atan2(math.sqrt(a), math.sqrt(1-a))\n d = R*c\n\n return d\n\n# Source for these two functions\n# https://www.learnopencv.com/rotation-matrix-to-euler-angles/\ndef isRotationMatrix(R) :\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype = R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6\n\ndef rotationMatrixToEulerAngles(R) :\n assert(isRotationMatrix(R))\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n \n return np.array([z, y, x])\n\ndef relative_rotation(X1, X2):\n \"\"\"\n Find relative (yaw, pitch, roll) rotation from X1 to X2 in degrees.\n Inputs:\n X1 - iterable(yaw, pitch, roll)\n X2 - iterable(yaw, pitch, roll)\n\n Primary references:\n https://en.wikipedia.org/wiki/Rotation_matrix\n https://www.learnopencv.com/rotation-matrix-to-euler-angles/\n \"\"\"\n yaw1 = math.radians(X1[0])\n pitch1 = math.radians(X1[1])\n roll1 = math.radians(X1[2])\n yaw2 = math.radians(X2[0])\n pitch2 = math.radians(X2[1])\n roll2 = math.radians(X2[2])\n\n # Use negative of the true angles here\n # NOTE: These rotations are for post multiplication\n rot_z_1 = np.array([[math.cos(yaw1), -math.sin(yaw1), 0], \\\n [math.sin(yaw1), math.cos(yaw1), 0], \\\n [0, 0, 1]])\n rot_y_1 = np.array([[math.cos(pitch1), 0, math.sin(pitch1)], \\\n [0, 1, 0],\n [-math.sin(pitch1), 0, math.cos(pitch1)]])\n \n # Ignore roll rotation for the purposes of this dataset (roll is always 0)\n rot_z_2 = np.array([[math.cos(yaw2), -math.sin(yaw2), 0], \\\n [math.sin(yaw2), math.cos(yaw2), 0], \\\n [0, 0, 1]])\n rot_y_2 = np.array([[math.cos(pitch2), 0, math.sin(pitch2)], \\\n [0, 1, 0],\n [-math.sin(pitch2), 0, math.cos(pitch2)]])\n\n # The transformation is generally rotation about z first, then y. To go from \n # X1 to X2, we have to undo y first, undo z, then perform z and y for X2.\n rot_undo_1 = np.matmul(rot_y_1.transpose(), rot_z_1.transpose())\n rot_do_2 = np.matmul(rot_z_2, rot_y_2)\n rot_relative = np.matmul(rot_undo_1, rot_do_2)\n \n # Convert rotation matrix to degrees\n angles = rotationMatrixToEulerAngles(rot_relative)\n\n return [math.degrees(angle_i) for angle_i in angles]\n\ndef relative_translation(X1, X2):\n \"\"\"\n Find relative (x, y, z) translation from X1 to X2 in meters.\n Inputs:\n X1 - tuple(latitude (deg), longitude (deg), height (m))\n X2 - tuple(latitude (deg), longitude (deg), height (m))\n \"\"\"\n R = 6371000.0 # In meters\n x_ = math.cos(math.radians(X2[0]))*math.sin(math.radians(X2[1]-X1[1]))\n y_ = math.cos(math.radians(X1[0]))*math.sin(math.radians(X2[0])) - \\\n math.sin(math.radians(X1[0]))*math.cos(math.radians(X2[0]))*math.cos(math.radians(X2[1]-X1[1]))\n bearing = math.atan2(x_, y_)\n hyp = haversine_distance(X1, X2)\n return (hyp*math.cos(bearing), hyp*math.sin(bearing), X2[2]-X1[2])\n\ndef angle_2points(point1, point2):\n \"\"\"\n point1 - numpy ndarray\n point2 - numpy ndarray\n Ref: https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249\n \n Returns angle between point1 and point2 in degrees\n \"\"\"\n \n c = np.dot(point1, point2)\n if c == 0:\n return 90.00\n else:\n c /= (np.linalg.norm(point1) * np.linalg.norm(point2))\n return np.degrees(np.arccos(np.clip(c, -1, 1)))\n\ndef baseline_angle_1(point1, point2, center):\n \"\"\"\n point1 - (latitude/phi, longitude/theta, height) triplet\n point2 - (latitude/phi, longitude/theta, height) triplet\n height - (latitude/phi, longitude/theta, height) triplet\n Formula obtained by converting from global coordinates to x,y,z coordinate\n with x, y axes on the equator plane and z from south pole to north pole\n \n Returns baseline angle between viewpoints 1 and 2\n \"\"\"\n phi1 = math.radians(point1[0])\n theta1 = math.radians(point1[1])\n h1 = point1[2]\n \n phi2 = math.radians(point2[0])\n theta2 = math.radians(point2[1])\n h2 = point2[2]\n \n phic = math.radians(center[0])\n thetac = math.radians(center[1])\n hc = center[2]\n \n R = 6371000.0 # In meters\n \n \"\"\"\n Convert to x, y, z coordinates\n \"\"\"\n # Optimization\n cos_phi1 = math.cos(phi1)\n cos_phi2 = math.cos(phi2)\n cos_phic = math.cos(phic)\n R1 = R + h1\n R2 = R + h2\n Rc = R + hc\n\n x1 = R1 * cos_phi1 * math.sin(theta1)\n y1 = -R1 * cos_phi1 * math.cos(theta1)\n z1 = R1 * math.sin(phi1)\n X1 = np.array([x1, y1, z1])\n \n x2 = R2 * cos_phi2 * math.sin(theta2)\n y2 = -R2 * cos_phi2 * math.cos(theta2)\n z2 = R2 * math.sin(phi2)\n X2 = np.array([x2, y2, z2])\n \n xc = Rc * cos_phic * math.sin(thetac)\n yc = -Rc * cos_phic * math.cos(thetac)\n zc = Rc * math.sin(phic)\n Xc = np.array([xc, yc, zc])\n \n return angle_2points(X1-Xc, X2-Xc)\n\ndef baseline_angle_2(point1, point2, center): \n \n \"\"\"\n point1 - (latitude/phi, longitude/theta, height) triplet\n point2 - (latitude/phi, longitude/theta, height) triplet\n height - (latitude/phi, longitude/theta, height) triplet\n Formula obtained by computing pairwise haversine distance\n and using cosine formula for triangles\n \n Returns baseline angle between viewpoints 1 and 2\n \"\"\"\n \n d1 = haversine_distance(point1[:2], center[:2])\n d2 = haversine_distance(point2[:2], center[:2])\n d3 = haversine_distance(point1[:2], point2[:2])\n \n cos_beta = (d1*d1 + d2*d2 - d3*d3)/(2*d1*d2)\n \n return math.degrees(math.acos(cos_beta))\n\ndef create_target_cache(dataset_dir, base_dir):\n \"\"\"\n Given the dataset root directory and the subset name, this function creates\n a target cache which is a dictionary with keys as target IDs and values as \n a dictionary consisting of the details about the target and its views. The\n cache is saved in the dataset root directory. If the cache is already found,\n it is just loaded.\n\n Inputs:\n dataset_dir : root dataset directory path\n base_dir : name of the train data subset like '0002/', '0003/', etc. \n\n Outputs:\n targets : a dictionary with keys as targetIDs. Values are dictionaries\n containing the following (key, value) pairs:\n targetCoord: <targetCoord tuple (latitude (degrees), \n longitude (degrees), height (meters))>\n views : list of different views of the target. Each \n element of the list contains a dictonary\n with \n * 'cameraCoord' as camera coordinates in\n (lat, long, ht) \n * 'distance' as distance to\n the target point,\n * 'imagePath' as path to the view image \n relative to root directory\n * 'alignData' as a list of the alignment\n values as documented in the dataset\n\n \"\"\"\n files = subprocess.check_output(['ls', dataset_dir + base_dir]).split()\n txtfiles = []\n imgfiles = []\n for f in files:\n if f[-3:] == 'txt':\n txtfiles.append(f)\n else:\n imgfiles.append(f)\n\n print(\"Number of images read: %d\"%(len(txtfiles)))\n\n \"\"\"\n Create the dictonary of target points\n \"\"\"\n targets = {}\n\n if os.path.isfile(dataset_dir + 'targets_%s.pkl'%(base_dir[:-1])):\n print('Loading saved file')\n targets = pickle.load(open(dataset_dir + 'targets_%s.pkl'%(base_dir[:-1])))\n print('Loaded saved file!')\n else:\n count = 0\n\n for f in txtfiles:\n strSplit = f.replace('.txt', '').split('_')\n targetID = int(strSplit[3])\n\n txtPath = base_dir + f\n with open(dataset_dir + txtPath) as infile:\n data = infile.read().split('\\n')[:-1]\n\n if len(data) == 2:\n data = data[0].split()\n targetCoord = (float(data[5]), float(data[6]), float(data[7]))\n targets[targetID] = {'targetCoord': targetCoord, 'views': []}\n\n count += 1\n #print('Done with %d/%d'%(count, len(txtfiles)))\n #else:\n #print('Ignoring target %d due to no alignment'%(targetID)) \n\n count = 0\n for f in txtfiles:\n strSplit = f.replace('.txt', '').split('_')\n targetID = int(strSplit[3])\n datasetID = int(strSplit[0])\n imageID = int(strSplit[1])\n viewID = int(strSplit[2])\n\n imgPath = base_dir + f.replace('.txt', '.jpg')\n txtPath = base_dir + f\n with open(dataset_dir + txtPath) as infile:\n data = infile.read().split('\\n')[:-1]\n\n if len(data) == 2:\n align_data = data[1].split()\n data = data[0].split()\n targetCoord = map(float, data[5:8])\n cameraCoord = map(float, data[11:14])\n cameraPose = map(float, data[15:18])\n\n distance = haversine_distance(targetCoord, cameraCoord)\n distance_given = float(data[14])\n\n #if abs(distance - distance_given) > 0.5:\n #print('Error in distance computation > 0.5m !')\n #pdb.set_trace()\n\n targets[targetID]['views'].append({'cameraCoord': cameraCoord, 'distance': distance_given, 'imagePath': imgPath, \\\n 'alignData': align_data, 'cameraPose': cameraPose})\n count += 1\n #print('Done with %d/%d'%(count, len(txtfiles)))\n\n pickle.dump(targets, open(dataset_dir + 'targets_%s.pkl'%(base_dir[:-1]), 'w'))\n\n return targets\n\ndef average_angular_error(predicted_angles, true_angles, average=True):\n \"\"\"\n Angle between predicted pose vector and ground truth vector in the plane defined by their\n cross products. \n Inputs:\n predicted_angles : Nx3 numpy array\n true_angles : Nx3 numpy array\n average : bool (if this is true, it returns average. Otherwise, it returns the\n error for each element.\n \"\"\"\n if average:\n avg_error = 0\n for i in range(predicted_angles.shape[0]):\n avg_error +=np.linalg.norm(np.array(relative_rotation(predicted_angles[i, :], true_angles[i, :])))\n \n avg_error /= float(predicted_angles.shape[0])\n \n return float(avg_error)\n else:\n errors = []\n for i in range(predicted_angles.shape[0]):\n errors.append(np.linalg.norm(np.array(relative_rotation(predicted_angles[i, :], true_angles[i, :]))))\n return errors\n\ndef average_translation_error(predicted_translations, true_translations, average=True):\n \"\"\"\n L2 norm of the difference between the normalized translation and ground truth\n vectors. \n Inputs:\n predicted_translations : Nx3 numpy array\n true_translations : Nx3 numpy array\n average : bool (if this is true, it returns average. Otherwise, it returns the\n error for each element.\n\n \"\"\"\n norm_predicted = np.sqrt(np.sum(predicted_translations * predicted_translations, 1))\n normalized_pred = predicted_translations / np.reshape(norm_predicted, (-1, 1))\n norm_predicted = np.sqrt(np.sum(true_translations * true_translations, 1))\n normalized_true = true_translations / np.reshape(norm_predicted, (-1, 1))\n \n if average:\n avg_error = np.sum((normalized_true - normalized_pred) * (normalized_true - normalized_pred), 1)\n avg_error = np.mean(avg_error)\n\n return float(avg_error)\n else:\n errors = np.sum((normalized_true - normalized_pred) * (normalized_true - normalized_pred), 1)\n return errors\n\ndef auc_score(predicted_probabilities, true_classes, get_roc=False):\n \"\"\"\n Computes the area under the ROC curve given the binary probabilities\n of predicting class 1 and the true class labels.\n Inputs:\n predicted_probabilities : N numpy array\n true_classes : N numpy array\n get_roc : bool, if True, return also the ROC curve\n \"\"\"\n if not get_roc:\n return float(roc_auc_score(true_classes, predicted_probabilities))\n else:\n fpr, tpr, thresh = roc_curve(true_classes, predicted_probabilities)\n return float(roc_auc_score(true_classes, predicted_probabilities)), tpr, fpr, thresh\n \n\n", "sub_path": "3d-generic/000_mtl_training/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 13994, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "math.radians", "line_number": 20, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 21, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 26, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 26, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 26, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 27, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 38, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 43, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 46, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 47, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 48, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 50, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 67, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 68, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 69, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 70, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 71, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 77, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 79, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 79, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 81, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 84, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 84, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 85, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 87, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 87, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 89, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 95, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 100, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 110, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 110, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 110, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 111, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 111, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 111, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 112, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 112, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 112, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 113, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 115, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.degrees", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 131, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 143, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 144, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 147, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 148, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 151, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 152, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 161, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 162, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 163, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 168, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 169, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 173, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 174, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 178, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 179, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 181, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 203, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 203, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 253, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 325, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 333, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 358, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 371, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 373, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 374, "usage_type": "call"}]} +{"seq_id": "493676369", "text": "\"\"\"File that contains the endpoints for the app.\"\"\"\nimport logging\nimport traceback\n\nfrom gevent.wsgi import WSGIServer\n\nfrom flask import (Flask, Response, render_template, request,\n send_from_directory)\nfrom pylogging import HandlerType, setup_logger\n\nfrom .config import CONFIG\n\nlogger = logging.getLogger(__name__)\napp = Flask(__name__, static_folder='../../front/src')\n\n\n@app.before_first_request\ndef init():\n \"\"\"Initialize the application with defaults.\"\"\"\n logger.info(\"App initialized\")\n\n\n@app.route('/')\ndef root():\n \"\"\"Root route.\"\"\"\n logger.info(\"route: /\")\n return app.send_static_file('index.html')\n # return render_template(\"index.html\")\n return\n\n\n@app.route('/index')\ndef system():\n \"\"\"System route.\"\"\"\n logger.info(\"route: /index\")\n return app.send_static_file('index.html')\n\n\n@app.route('/outdex')\ndef object():\n \"\"\"Object route.\"\"\"\n logger.info(\"route: /outdex\")\n return app.send_static_file('outdex.html')\n\n\n@app.route('/node_modules/<path:path>')\ndef send_node_modules(path):\n \"\"\"Server static files from node_modules.\"\"\"\n logger.info(\"route: node_modules/{}\".format(path))\n path_prefix = '../../front/node_modules'\n return send_from_directory(path_prefix, path)\n\n\n@app.route('/<path:path>')\ndef send_static(path):\n \"\"\"Server static files.\"\"\"\n logger.info(\"route: {}\".format(path))\n path_prefix = '../../front/src'\n return send_from_directory(path_prefix, path)\n\n\ndef main():\n \"\"\"Main entry point of the app.\"\"\"\n try:\n http_server = WSGIServer((CONFIG['host'], CONFIG['port']),\n app,\n log=logging,\n error_log=logging)\n\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass\n", "sub_path": "back/server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 59, "usage_type": "call"}, {"api_name": "gevent.wsgi.WSGIServer", "line_number": 65, "usage_type": "call"}, {"api_name": "config.CONFIG", "line_number": 65, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "157469094", "text": "import time\n\nimport kubernetes\n\nfrom dagster import Field, In, Noneable, Nothing, Permissive, StringSource, op\nfrom dagster._annotations import experimental\nfrom dagster._utils import merge_dicts\n\nfrom ..container_context import K8sContainerContext\nfrom ..job import (\n DagsterK8sJobConfig,\n UserDefinedDagsterK8sConfig,\n construct_dagster_k8s_job,\n get_k8s_job_name,\n)\nfrom ..launcher import K8sRunLauncher\nfrom ..utils import (\n wait_for_job,\n wait_for_job_to_have_pods,\n wait_for_pod,\n wait_for_running_job_to_succeed,\n)\n\n\n@op(\n ins={\"start_after\": In(Nothing)},\n config_schema=merge_dicts(\n DagsterK8sJobConfig.config_type_container(),\n {\n \"image\": Field(\n StringSource,\n is_required=True,\n description=\"The image in which to launch the k8s job.\",\n ),\n \"command\": Field(\n [str],\n is_required=False,\n description=\"The command to run in the container within the launched k8s job.\",\n ),\n \"args\": Field(\n [str],\n is_required=False,\n description=\"The args for the command for the container.\",\n ),\n \"namespace\": Field(StringSource, is_required=False),\n \"load_incluster_config\": Field(\n bool,\n is_required=False,\n default_value=True,\n description=\"\"\"Set this value if you are running the launcher\n within a k8s cluster. If ``True``, we assume the launcher is running within the target\n cluster and load config using ``kubernetes.config.load_incluster_config``. Otherwise,\n we will use the k8s config specified in ``kubeconfig_file`` (using\n ``kubernetes.config.load_kube_config``) or fall back to the default kubeconfig.\"\"\",\n ),\n \"kubeconfig_file\": Field(\n Noneable(str),\n is_required=False,\n default_value=None,\n description=\"The kubeconfig file from which to load config. Defaults to using the default kubeconfig.\",\n ),\n \"timeout\": Field(\n int,\n is_required=False,\n description=\"How long to wait for the job to succeed before raising an exception\",\n ),\n \"container_config\": Field(\n Permissive(),\n is_required=False,\n description=\"Raw k8s config for the k8s pod's main container (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#container-v1-core). Keys can either snake_case or camelCase.\",\n ),\n \"pod_template_spec_metadata\": Field(\n Permissive(),\n is_required=False,\n description=\"Raw k8s config for the k8s pod's metadata (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta). Keys can either snake_case or camelCase.\",\n ),\n \"pod_spec_config\": Field(\n Permissive(),\n is_required=False,\n description=\"Raw k8s config for the k8s pod's pod spec (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podspec-v1-core). Keys can either snake_case or camelCase.\",\n ),\n \"job_metadata\": Field(\n Permissive(),\n is_required=False,\n description=\"Raw k8s config for the k8s job's metadata (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta). Keys can either snake_case or camelCase.\",\n ),\n \"job_spec_config\": Field(\n Permissive(),\n is_required=False,\n description=\"Raw k8s config for the k8s job's job spec (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#jobspec-v1-batch). Keys can either snake_case or camelCase.\",\n ),\n },\n ),\n)\n@experimental\ndef k8s_job_op(context):\n \"\"\"\n An op that runs a Kubernetes job using the k8s API.\n\n Contrast with the `k8s_job_executor`, which runs each Dagster op in a Dagster job in its\n own k8s job.\n\n This op may be useful when:\n - You need to orchestrate a command that isn't a Dagster op (or isn't written in Python)\n - You want to run the rest of a Dagster job using a specific executor, and only a single\n op in k8s.\n\n For example:\n\n .. literalinclude:: ../../../../../../python_modules/libraries/dagster-k8s/dagster_k8s_tests/unit_tests/test_example_k8s_job_op.py\n :start-after: start_marker\n :end-before: end_marker\n :language: python\n\n The service account that is used to run this job should have the following RBAC permissions:\n\n .. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/kubernetes/k8s_job_op_rbac.yaml\n :language: YAML\n \"\"\"\n\n config = context.op_config\n\n run_container_context = K8sContainerContext.create_for_run(\n context.pipeline_run,\n context.instance.run_launcher\n if isinstance(context.instance.run_launcher, K8sRunLauncher)\n else None,\n )\n\n op_container_context = K8sContainerContext(\n image_pull_policy=config.get(\"image_pull_policy\"), # type: ignore\n image_pull_secrets=config.get(\"image_pull_secrets\"), # type: ignore\n service_account_name=config.get(\"service_account_name\"), # type: ignore\n env_config_maps=config.get(\"env_config_maps\"), # type: ignore\n env_secrets=config.get(\"env_secrets\"), # type: ignore\n env_vars=config.get(\"env_vars\"), # type: ignore\n volume_mounts=config.get(\"volume_mounts\"), # type: ignore\n volumes=config.get(\"volumes\"), # type: ignore\n labels=config.get(\"labels\"), # type: ignore\n namespace=config.get(\"namespace\"), # type: ignore\n resources=config.get(\"resources\"), # type: ignore\n )\n\n container_context = run_container_context.merge(op_container_context)\n\n namespace = container_context.namespace\n\n container_config = config.get(\"container_config\", {})\n command = config.get(\"command\")\n if command:\n container_config[\"command\"] = command\n\n user_defined_k8s_config = UserDefinedDagsterK8sConfig(\n container_config=container_config,\n pod_template_spec_metadata=config.get(\"pod_template_spec_metadata\"),\n pod_spec_config=config.get(\"pod_spec_config\"),\n job_metadata=config.get(\"job_metadata\"),\n job_spec_config=config.get(\"job_spec_config\"),\n )\n\n k8s_job_config = DagsterK8sJobConfig(\n job_image=config[\"image\"],\n dagster_home=None,\n image_pull_policy=container_context.image_pull_policy,\n image_pull_secrets=container_context.image_pull_secrets,\n service_account_name=container_context.service_account_name,\n instance_config_map=None,\n postgres_password_secret=None,\n env_config_maps=container_context.env_config_maps,\n env_secrets=container_context.env_secrets,\n env_vars=container_context.env_vars,\n volume_mounts=container_context.volume_mounts,\n volumes=container_context.volumes,\n labels=container_context.labels,\n resources=container_context.resources,\n )\n\n job_name = get_k8s_job_name(context.run_id, context.op.name)\n\n job = construct_dagster_k8s_job(\n job_config=k8s_job_config,\n args=config.get(\"args\"),\n job_name=job_name,\n pod_name=job_name,\n component=\"k8s_job_op\",\n user_defined_k8s_config=user_defined_k8s_config,\n labels={\n \"dagster/job\": context.pipeline_run.pipeline_name,\n \"dagster/op\": context.op.name,\n \"dagster/run-id\": context.pipeline_run.run_id,\n },\n )\n\n if config[\"load_incluster_config\"]:\n kubernetes.config.load_incluster_config()\n else:\n kubernetes.config.load_kube_config(config.get(\"kubeconfig_file\"))\n\n context.log.info(f\"Creating Kubernetes job {job_name} in namespace {namespace}...\")\n\n start_time = time.time()\n\n kubernetes.client.BatchV1Api().create_namespaced_job(namespace, job)\n\n core_api = kubernetes.client.CoreV1Api()\n\n context.log.info(\"Waiting for Kubernetes job to finish...\")\n\n timeout = config.get(\"timeout\", 0)\n\n wait_for_job(\n job_name=job_name,\n namespace=namespace,\n wait_timeout=timeout,\n start_time=start_time,\n )\n\n pods = wait_for_job_to_have_pods(\n job_name,\n namespace,\n wait_timeout=timeout,\n start_time=start_time,\n )\n\n pod_names = [p.metadata.name for p in pods]\n\n if not pod_names:\n raise Exception(\"No pod names in job after it started\")\n\n pod_to_watch = pod_names[0]\n watch = kubernetes.watch.Watch()\n\n wait_for_pod(pod_to_watch, namespace, wait_timeout=timeout, start_time=start_time)\n\n log_stream = watch.stream(\n core_api.read_namespaced_pod_log, name=pod_to_watch, namespace=namespace\n )\n\n while True:\n if timeout and time.time() - start_time > timeout:\n watch.stop()\n raise Exception(\"Timed out waiting for pod to finish\")\n\n try:\n log_entry = next(log_stream)\n print(log_entry) # pylint: disable=print-call\n except StopIteration:\n break\n\n wait_for_running_job_to_succeed(\n job_name=job_name,\n namespace=namespace,\n wait_timeout=timeout,\n start_time=start_time,\n )\n", "sub_path": "python_modules/libraries/dagster-k8s/dagster_k8s/ops/k8s_job_op.py", "file_name": "k8s_job_op.py", "file_ext": "py", "file_size_in_byte": 9572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "container_context.K8sContainerContext.create_for_run", "line_number": 123, "usage_type": "call"}, {"api_name": "container_context.K8sContainerContext", "line_number": 123, "usage_type": "name"}, {"api_name": "launcher.K8sRunLauncher", "line_number": 126, "usage_type": "argument"}, {"api_name": "container_context.K8sContainerContext", "line_number": 130, "usage_type": "call"}, {"api_name": "container_context.namespace", "line_number": 146, "usage_type": "attribute"}, {"api_name": "job.UserDefinedDagsterK8sConfig", "line_number": 153, "usage_type": "call"}, {"api_name": "job.DagsterK8sJobConfig", "line_number": 161, "usage_type": "call"}, {"api_name": "container_context.image_pull_policy", "line_number": 164, "usage_type": "attribute"}, {"api_name": "container_context.image_pull_secrets", "line_number": 165, "usage_type": "attribute"}, {"api_name": "container_context.service_account_name", "line_number": 166, "usage_type": "attribute"}, {"api_name": "container_context.env_config_maps", "line_number": 169, "usage_type": "attribute"}, {"api_name": "container_context.env_secrets", "line_number": 170, "usage_type": "attribute"}, {"api_name": "container_context.env_vars", "line_number": 171, "usage_type": "attribute"}, {"api_name": "container_context.volume_mounts", "line_number": 172, "usage_type": "attribute"}, {"api_name": "container_context.volumes", "line_number": 173, "usage_type": "attribute"}, {"api_name": "container_context.labels", "line_number": 174, "usage_type": "attribute"}, {"api_name": "container_context.resources", "line_number": 175, "usage_type": "attribute"}, {"api_name": "job.get_k8s_job_name", "line_number": 178, "usage_type": "call"}, {"api_name": "job.construct_dagster_k8s_job", "line_number": 180, "usage_type": "call"}, {"api_name": "kubernetes.config.load_incluster_config", "line_number": 195, "usage_type": "call"}, {"api_name": "kubernetes.config", "line_number": 195, "usage_type": "attribute"}, {"api_name": "kubernetes.config.load_kube_config", "line_number": 197, "usage_type": "call"}, {"api_name": "kubernetes.config", "line_number": 197, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 201, "usage_type": "call"}, {"api_name": "kubernetes.client.BatchV1Api", "line_number": 203, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 203, "usage_type": "attribute"}, {"api_name": "kubernetes.client.CoreV1Api", "line_number": 205, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 205, "usage_type": "attribute"}, {"api_name": "utils.wait_for_job", "line_number": 211, "usage_type": "call"}, {"api_name": "utils.wait_for_job_to_have_pods", "line_number": 218, "usage_type": "call"}, {"api_name": "kubernetes.watch.Watch", "line_number": 231, "usage_type": "call"}, {"api_name": "kubernetes.watch", "line_number": 231, "usage_type": "attribute"}, {"api_name": "utils.wait_for_pod", "line_number": 233, "usage_type": "call"}, {"api_name": "time.time", "line_number": 240, "usage_type": "call"}, {"api_name": "utils.wait_for_running_job_to_succeed", "line_number": 250, "usage_type": "call"}, {"api_name": "dagster.op", "line_number": 25, "usage_type": "call"}, {"api_name": "dagster.In", "line_number": 26, "usage_type": "call"}, {"api_name": "dagster.Nothing", "line_number": 26, "usage_type": "argument"}, {"api_name": "dagster._utils.merge_dicts", "line_number": 27, "usage_type": "call"}, {"api_name": "job.DagsterK8sJobConfig.config_type_container", "line_number": 28, "usage_type": "call"}, {"api_name": "job.DagsterK8sJobConfig", "line_number": 28, "usage_type": "name"}, {"api_name": "dagster.Field", "line_number": 30, "usage_type": "call"}, {"api_name": "dagster.StringSource", "line_number": 31, "usage_type": "argument"}, {"api_name": "dagster.Field", "line_number": 35, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 40, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 45, "usage_type": "call"}, {"api_name": "dagster.StringSource", "line_number": 45, "usage_type": "argument"}, {"api_name": "dagster.Field", "line_number": 46, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 56, "usage_type": "call"}, {"api_name": "dagster.Noneable", "line_number": 57, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 62, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 67, "usage_type": "call"}, {"api_name": "dagster.Permissive", "line_number": 68, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 72, "usage_type": "call"}, {"api_name": "dagster.Permissive", "line_number": 73, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 77, "usage_type": "call"}, {"api_name": "dagster.Permissive", "line_number": 78, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 82, "usage_type": "call"}, {"api_name": "dagster.Permissive", "line_number": 83, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 87, "usage_type": "call"}, {"api_name": "dagster.Permissive", "line_number": 88, "usage_type": "call"}, {"api_name": "dagster._annotations.experimental", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "291783015", "text": "import speech_recognition as sr\nimport pyttsx3\n\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n\tprint(\"recording ..\")\n\taudio=r.listen(source)\n\torder = r.recognize_google(audio, language=\"fr\")\n\nprint(\"end of recording\")\n\nprint(order)\n\"\"\"\nengine = pyttsx3.init()\nengine.say(order)\nengine.runAndWait()\"\"\"", "sub_path": "speech.py", "file_name": "speech.py", "file_ext": "py", "file_size_in_byte": 306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 4, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "109969907", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import *\nimport json\nfrom functools import reduce\n\n# Create your views here.\n\n\n\ndef index(request):\n user = request.user\n booktypes = BookTypes.objects.filter(status=True).values(\"id\",\"typename\")\n books = Books.objects.filter(isshelves=True).order_by(\"-readed\")[:10].values(\"id\",\"bname\")\n return render(request, 'index.html',locals())\n\n\ndef booktype(request):\n # uname = request.session.get('uname')\n booktype_id = request.GET.get('booktype_id')\n\n btype = BookTypes.objects.filter(id=booktype_id)[0]\n books = btype.book_set.filter(isshelves=True)[:20]\n\n booknum = btype.book_set.filter(isshelves=True).count()\n page = [x for x in range(1,booknum//20+1)]\n return render(request, 'booktype.html', locals())\n\n\ndef ajax_book(request):\n booktype_id = int(request.GET.get('booktype_id'))\n page = int(request.GET.get('page'))\n start = (page-1)*20\n end = start+20\n books = BookTypes.objects.filter(id=booktype_id)[0].book_set.filter(isshelves=True)[start:end]\n\n li = []\n for book in books:\n dic = {}\n dic[\"book_id\"] = book.id\n dic[\"bname\"] = str(book.bname)\n dic[\"cover\"] = str(book.cover)\n dic[\"readed\"] = book.readed\n dic[\"recommended\"] = book.recommended\n dic[\"author\"] = str(book.author)\n dic[\"shelvers_time\"] = str(book.shelvers_time)\n dic[\"synopsis\"] = book.synopsis\n dic[\"status\"] = book.status\n li.append(dic)\n return HttpResponse(json.dumps(li))\n\n\n\n# bname = models.CharField('书名', max_length=100, db_index=True)\n# synopsis = models.TextField('描述', blank=True)\n# status = models.IntegerField('状态', blank=True, choices=BOOKSTATUS, default=1)\n# cover = models.ImageField('封面', upload_to='images/cover', default='default.png')\n# isshelves = models.BooleanField('上架', default=False)\n# readed = models.IntegerField('点击量', default=0)\n# recommended = models.IntegerField('推荐', default=0)\n# shelvers_time = models.DateField('上架时间', default=None)\n# author = models.ForeignKey(Authors, on_delete=models.CASCADE, verbose_name='作者')\n \n # comments_set 评论\n # booktypes_set 类型\n # users_set 收藏用户\ndef bookdetails(request):\n user_id = request.user.id\n book_id = int(request.GET.get('book_id'))\n booktype_id = int(request.GET.get('booktype_id','0'))\n if booktype_id:\n btype =BookTypes.objects.filter(id=booktype_id)[0] \n book = Books.objects.filter(id=book_id)[0]\n booktype = reduce(lambda x,y:x+'|'+y,(x.typename for x in book.booktypes_set.all()))\n users_conut = book.users_set.count()\n comments = book.comments_set.filter(attribution=0,isactivation=True).order_by(\"-published_time\")\n return render(request, 'details.html',locals())\n\n\n# attribution = models.IntegerField('归属',default=0)#用于评论的回复,为0是一级评论,二级及以下写对应评论的id\n# context = models.TextField('内容',blank=True)\n# published_time = models.DateTimeField('发表时间')\n# likes = models.IntegerField('好看',default=0)\n# book = models.ForeignKey(Books,on_delete=models.CASCADE)\n# user = models.ForeignKey(Users,on_delete=models.CASCADE)\n\n\ndef er_comments(request):\n '''二级以上评论提取,取一条主评论的所有副评论'''\n book_id = int(request.GET.get('book_id'))\n comment_id = int(request.GET.get('comment_id'))\n book = Books.objects.filter(id=book_id)[0]\n book_er_comments = book.comments_set.filter(attribution__gt=0,isactivation=True).order_by(\"-published_time\")#此书的所有二级评论集合\n \n id_L = []\n def get_comment_id(comment_id,book_er_comments):\n comments_L = book_er_comments.filter(attribution=comment_id)\n if comments_L:\n for comment in comments_L:\n id_L.append(comment.id)\n get_comment_id(comment.id,book_er_comments)\n else:\n return\n\n get_comment_id(comment_id,book_er_comments)\n L = []\n for comment_id in id_L:\n comment = book_er_comments.filter(id=comment_id)\n if not comment:\n continue\n comment=comment[0]\n print(comment.published_time)\n dic = {\n \"id\":str(comment.id),\n \"attribution_name\":book.comments_set.filter(id=comment.attribution)[0].user.nickname,\n \"username\":comment.user.nickname,\n \"portrait\":str(comment.user.portrait),\n \"context\":comment.context,\n \"likes\":str(comment.likes),\n \"published_time\":str(comment.published_time),\n }\n L.append(dic)\n return HttpResponse(json.dumps(L))\n\n\n\n\n\n\n\n\n", "sub_path": "zhishu/index/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 123, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "255221872", "text": "from tkinter import ttk\nimport tkinter as tk\nimport sqlite3\n\n\ndef connect():\n conn = sqlite3.connect(\"TELBOOK.db\")\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS profile(id INTEGER PRIMARY KEY,\\\n First TEXT, Surname TEXT,Tel TEXT,info TEXT)\")\n conn.commit()\n conn.close()\n\n\ndef View():\n conn = sqlite3.connect(\"TELBOOK.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM profile\")\n rows = cur.fetchall()\n for row in rows:\n print(row) # it print all records in the database\n tree.insert(\"\", tk.END, values=row)\n conn.close()\n\n\nconnect() # this to create the db\n\n# WIINDOW CREATE======\nroot = tk.Tk()\nroot.geometry(\"1000x500\")\n\n# HEADERS DATA============================================================================= \ntree= ttk.Treeview(root, column=(\"column1\", \"column2\", \"column3\",\"column4\",\"column5\"), show='headings')\ntree.heading(\"#1\", text=\"NUMBER\")\ntree.heading(\"#2\", text=\"FIRST NAME\")\ntree.heading(\"#3\", text=\"SURNAME\")\ntree.heading(\"#4\", text=\"TELEPHON\")\ntree.heading(\"#5\", text=\"INFORMATION\")\ntree.pack()\n\n# BUTTON CREATE========================== \nb2 = tk.Button(text=\"view data\", command=View)\nb2.pack()\n\nroot.mainloop()\n\n", "sub_path": "database_reg_samples/python_sqlite/phone_book_gui/phone_book_tk.pyw", "file_name": "phone_book_tk.pyw", "file_ext": "pyw", "file_size_in_byte": 1231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 29, "usage_type": "call"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 33, "usage_type": "name"}, {"api_name": "tkinter.Button", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "408230101", "text": "import math\nfrom datetime import date, timedelta, datetime\n\nimport pytz\nfrom braces.views import SuperuserRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model, logout\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.db.models import ExpressionWrapper, Sum, DecimalField, F\nfrom django.http import HttpResponseRedirect\nfrom django.utils.timezone import now\nfrom django.views.generic import CreateView, ListView, UpdateView, TemplateView\n\nfrom cinema.settings import TIME_ZONE\nfrom cinema_app.forms import SignUpForm, HallForm, CreateSessionForm, TicketPurchaseForm, EditSessionForm\nfrom cinema_app.models import CinemaUser, Session, Hall, Ticket, Film\nfrom cinema_app.schedule_settings import SCHEDULE_SORTING_METHODS, ALLOWED_DAYS_BEFORE_EDITING, \\\n BREAK_BETWEEN_FILMS_MINUTES\n\nUser = get_user_model()\n\n\nclass Registration(CreateView):\n model = CinemaUser\n form_class = SignUpForm\n template_name = 'registration.html'\n success_url = '/'\n\n\nclass Login(LoginView):\n template_name = 'login.html'\n\n\nclass Logout(LoginRequiredMixin, LogoutView):\n\n def get(self, request, *args, **kwargs):\n logout(request.user)\n return super().get(request, *args, **kwargs)\n\n\nclass SessionScheduleUtilityMixin:\n\n @staticmethod\n def schedule_sorting(query_to_sort, obj_request, sort_key, sorting_methods):\n\n if obj_request.GET[sort_key] == sorting_methods[0]:\n return query_to_sort.order_by('start_datetime', 'session_price')\n\n if obj_request.GET[sort_key] == sorting_methods[1]:\n return query_to_sort.order_by('-start_datetime', 'session_price')\n\n if obj_request.GET[sort_key] == sorting_methods[2]:\n return query_to_sort.order_by('session_price', 'start_datetime')\n\n if obj_request.GET[sort_key] == sorting_methods[3]:\n return query_to_sort.order_by('-session_price', 'start_datetime')\n\n return query_to_sort\n\n @staticmethod\n def check_session_overlap(existing_session_dict, session_to_create_dict, start_time_name, end_time_name):\n\n latest_start = max(existing_session_dict.get(start_time_name), session_to_create_dict.get(start_time_name))\n earliest_end = min(existing_session_dict.get(end_time_name), session_to_create_dict.get(end_time_name))\n\n if earliest_end >= latest_start:\n delta = math.floor((earliest_end - latest_start).seconds / 60)\n\n return delta\n\n delta = 0\n\n return delta\n\n\nclass ProductList(ListView):\n model = Session\n template_name = 'products.html'\n paginate_by = 20\n queryset = Session.objects.all()\n\n\nclass AdminToolsView(SuperuserRequiredMixin, TemplateView):\n template_name = 'admin_tools.html'\n\n\nclass CreateHallView(SuperuserRequiredMixin, CreateView):\n model = Hall\n form_class = HallForm\n template_name = 'create_form.html'\n success_url = '/admin_tools/'\n\n def get_initial(self):\n if self.request.session.get('hall_color') and self.request.session.get('hall_capacity'):\n self.initial['hall_color'] = self.request.session.get('hall_color')\n self.initial['hall_capacity'] = self.request.session.get('hall_capacity')\n\n del self.request.session['hall_color']\n del self.request.session['hall_capacity']\n\n return self.initial.copy()\n\n def form_valid(self, form):\n hall_form = form.save(commit=False)\n\n hall_names = Hall.objects.values_list('hall_color', flat=True)\n\n if hall_form.hall_color in hall_names:\n self.request.session.update(form.cleaned_data)\n\n msg = 'This name is already used for another hall'\n messages.warning(self.request, msg)\n\n return HttpResponseRedirect('/admin_tools/create_hall/')\n\n return super().form_valid(form)\n\n\nclass AvailableToEditHallView(SuperuserRequiredMixin, ListView):\n model = Hall\n template_name = 'halls_to_edit.html'\n queryset = Hall.objects.all()\n paginate_by = 10\n\n def get_queryset(self):\n halls_in_use = Ticket.objects.filter(\n ticket_for_session__start_datetime__gt=now() - timedelta(\n days=ALLOWED_DAYS_BEFORE_EDITING)).values_list(\n 'ticket_for_session__hall__id')\n halls_to_render = Hall.objects.exclude(id__in=halls_in_use)\n\n return halls_to_render\n\n\nclass EditHallView(SuperuserRequiredMixin, UpdateView):\n model = Hall\n form_class = HallForm\n template_name = 'create_form.html'\n success_url = '/admin_tools/halls_list/'\n\n def get_initial(self):\n if self.request.session.get('hall_color') and self.request.session.get('hall_capacity'):\n self.initial['hall_color'] = self.request.session.get('hall_color')\n self.initial['hall_capacity'] = self.request.session.get('hall_capacity')\n\n del self.request.session['hall_color']\n del self.request.session['hall_capacity']\n\n return self.initial.copy()\n\n def form_valid(self, form):\n hall_form = form.save(commit=False)\n\n halls_in_use = Ticket.objects.filter(\n ticket_for_session__start_datetime__gt=now() - timedelta(\n days=ALLOWED_DAYS_BEFORE_EDITING)).values_list(\n 'ticket_for_session__hall__id', flat=True)\n\n if hall_form.id in halls_in_use:\n self.request.session.update(self.initial)\n\n msg = 'This hall is already in use'\n messages.error(self.request, msg)\n\n return HttpResponseRedirect('/admin_tools/halls_list/edit/{}/'.format(hall_form.id))\n\n hall_names = Hall.objects.exclude(id=hall_form.id).values_list('hall_color', flat=True)\n\n if hall_form.hall_color in hall_names:\n self.request.session.update(self.initial)\n\n msg = 'This name is already used for another hall'\n messages.error(self.request, msg)\n\n return HttpResponseRedirect('/admin_tools/halls_list/edit/{}/'.format(hall_form.id))\n\n return super().form_valid(form)\n\n\nclass CreateSessionView(SuperuserRequiredMixin, CreateView, SessionScheduleUtilityMixin):\n model = Session\n form_class = CreateSessionForm\n template_name = 'create_form.html'\n success_url = '/admin_tools/create_session/'\n\n def form_valid(self, form):\n new_session = form.save(commit=False)\n\n film_duration = Film.objects.get(id=new_session.film_id).schedule_minutes\n hall_for_session = form.cleaned_data['hall']\n\n new_session_start_date = form.cleaned_data['session_date_start']\n new_session_end_date = form.cleaned_data['session_date_end']\n new_session_start_time = form.cleaned_data['session_start_time']\n\n days_in_new_session = []\n\n \"\"\"Creates a list for each day between dates in form\"\"\"\n for day in range((new_session_end_date - new_session_start_date).days + 1):\n day_obj = new_session_start_date + timedelta(days=day)\n days_in_new_session.append(day_obj)\n\n \"\"\"Searches for possible conflicting session on each day\"\"\"\n for day in days_in_new_session:\n ids_of_conflicting_sessions = Session.objects.filter(start_datetime__date=day,\n hall=new_session.hall_id).values_list('id', flat=True)\n\n starting_datetime_new_session = datetime.combine(day, new_session_start_time)\n\n \"\"\"Required for multiple creation to model and not editing of only 1 field\"\"\"\n new_session.id, new_session.pk = None, None\n\n if not ids_of_conflicting_sessions:\n new_session.start_datetime = starting_datetime_new_session\n new_session.save()\n\n msg = 'Session on {} in hall {} is created'.format(starting_datetime_new_session, hall_for_session)\n messages.success(self.request, msg)\n\n else:\n\n for session_id in ids_of_conflicting_sessions:\n session_instance = Session.objects.get(id=session_id)\n\n existing_session = {}\n session_to_create = {}\n\n ending_time_with_break = starting_datetime_new_session + timedelta(\n minutes=film_duration + BREAK_BETWEEN_FILMS_MINUTES)\n\n \"\"\"Required to add timezone variable to datetime received from form\"\"\"\n local_time = pytz.timezone(TIME_ZONE)\n\n existing_session['start_datetime'] = session_instance.start_datetime\n existing_session['end_datetime'] = session_instance.film_end_with_break\n session_to_create['start_datetime'] = local_time.localize(starting_datetime_new_session)\n session_to_create['end_datetime'] = local_time.localize(ending_time_with_break)\n\n overlap = self.check_session_overlap(existing_session, session_to_create, 'start_datetime',\n 'end_datetime')\n\n \"\"\"Is overlap is greater than 0, then films in this hall are overlapping\"\"\"\n if overlap:\n\n msg = 'Session on {} overlapping another session in hall {} for {} minute(s)'.format(\n starting_datetime_new_session, hall_for_session, overlap)\n messages.warning(self.request, msg)\n\n else:\n\n new_session.start_datetime = starting_datetime_new_session\n new_session.save()\n\n msg = 'Session on {} in hall {} is created'.format(starting_datetime_new_session,\n hall_for_session)\n messages.success(self.request, msg)\n\n return HttpResponseRedirect(self.success_url)\n\n\nclass SessionListWithoutTicketsView(SuperuserRequiredMixin, ListView, SessionScheduleUtilityMixin):\n model = Session\n template_name = 'no_tickets_session.html'\n paginate_by = 10\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(object_list=object_list, **kwargs)\n context['sort_methods'] = SCHEDULE_SORTING_METHODS\n return context\n\n def get_queryset(self):\n queryset = Session.objects.filter(\n start_datetime__gt=now() + timedelta(days=ALLOWED_DAYS_BEFORE_EDITING)).order_by('start_datetime')\n\n if self.request.GET.get('sort') in SCHEDULE_SORTING_METHODS:\n queryset = self.schedule_sorting(queryset, self.request, 'sort', SCHEDULE_SORTING_METHODS)\n\n sessions_without_tickets = [obj for obj in queryset if not obj.purchased_tickets]\n\n return sessions_without_tickets\n\n\nclass EditSessionView(SuperuserRequiredMixin, UpdateView, SessionScheduleUtilityMixin):\n model = Session\n form_class = EditSessionForm\n template_name = 'create_form.html'\n success_url = '/admin_tools/session_list/'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n session_instance = context.get('session')\n\n context['form'].initial['session_date_start'] = session_instance.start_datetime.date()\n context['form'].initial['session_start_time'] = session_instance.start_datetime.time().strftime('%H:%M')\n\n return context\n\n def form_valid(self, form):\n session_editing = form.save(commit=False)\n\n film_duration = Film.objects.get(id=session_editing.film_id).schedule_minutes\n hall_for_session = form.cleaned_data['hall']\n\n new_session_start_date = form.cleaned_data['session_date_start']\n new_session_start_time = form.cleaned_data['session_start_time']\n starting_datetime_new_session = datetime.combine(new_session_start_date, new_session_start_time)\n\n ids_of_conflicting_sessions = Session.objects.filter(start_datetime__date=new_session_start_date,\n hall=session_editing.hall_id).exclude(\n id=session_editing.id).values_list('id', flat=True)\n\n if not ids_of_conflicting_sessions:\n session_editing.start_datetime = starting_datetime_new_session\n session_editing.save()\n\n msg = 'Session on {} in hall {} is created'.format(starting_datetime_new_session, hall_for_session)\n messages.success(self.request, msg)\n\n else:\n\n for session_id in ids_of_conflicting_sessions:\n session_instance = Session.objects.get(id=session_id)\n\n existing_session = {}\n session_to_create = {}\n\n ending_time_with_break = starting_datetime_new_session + timedelta(\n minutes=film_duration + BREAK_BETWEEN_FILMS_MINUTES)\n\n \"\"\"Required to add timezone variable to datetime received from form\"\"\"\n local_time = pytz.timezone(TIME_ZONE)\n\n existing_session['start_datetime'] = session_instance.start_datetime\n existing_session['end_datetime'] = session_instance.film_end_with_break\n session_to_create['start_datetime'] = local_time.localize(starting_datetime_new_session)\n session_to_create['end_datetime'] = local_time.localize(ending_time_with_break)\n\n overlap = self.check_session_overlap(existing_session, session_to_create, 'start_datetime',\n 'end_datetime')\n\n \"\"\"Is overlap is greater than 0, then films in this hall are overlapping\"\"\"\n if overlap:\n\n msg = 'Session on {} overlapping another session in hall {} for {} minute(s)'.format(\n starting_datetime_new_session, hall_for_session, overlap)\n messages.warning(self.request, msg)\n\n else:\n\n session_editing.start_datetime = starting_datetime_new_session\n session_editing.save()\n\n msg = 'Session on {} in hall {} is created'.format(starting_datetime_new_session,\n hall_for_session)\n messages.success(self.request, msg)\n\n return HttpResponseRedirect(self.success_url)\n\n\nclass ScheduleTodayView(ListView, SessionScheduleUtilityMixin):\n model = Session\n template_name = 'sessions_today.html'\n queryset = Session.objects.filter(start_datetime__contains=date.today())\n paginate_by = 10\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(object_list=object_list, **kwargs)\n context['sort_methods'] = SCHEDULE_SORTING_METHODS\n return context\n\n def get_queryset(self):\n if self.request.GET.get('sort') in SCHEDULE_SORTING_METHODS:\n sorting_result = self.schedule_sorting(self.queryset, self.request, 'sort', SCHEDULE_SORTING_METHODS)\n return sorting_result\n\n return self.queryset\n\n\nclass ScheduleTomorrowView(ListView, SessionScheduleUtilityMixin):\n model = Session\n template_name = 'sessions_tomorrow.html'\n queryset = Session.objects.filter(\n start_datetime__contains=date.today() + timedelta(days=ALLOWED_DAYS_BEFORE_EDITING))\n paginate_by = 10\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(object_list=object_list, **kwargs)\n context['sort_methods'] = SCHEDULE_SORTING_METHODS\n context.update({'form': TicketPurchaseForm})\n return context\n\n def get_queryset(self):\n if self.request.GET.get('sort') in SCHEDULE_SORTING_METHODS:\n sorting_result = self.schedule_sorting(self.queryset, self.request, 'sort', SCHEDULE_SORTING_METHODS)\n return sorting_result\n\n return self.queryset\n\n\nclass OrderTicketView(LoginRequiredMixin, CreateView):\n model = Ticket\n form_class = TicketPurchaseForm\n template_name = 'order_ticket.html'\n success_url = '/order_history/'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n session_id = int(self.request.GET['session'])\n session_object = Session.objects.get(id=session_id)\n\n context['film'] = session_object.film.film_name\n context['description'] = session_object.film.film_description\n context['start'] = session_object.start_datetime\n context['price'] = session_object.session_price\n\n return context\n\n def get_initial(self):\n if self.request.session.get('old_value'):\n self.initial['ordered_seats'] = self.request.session.get('old_value')\n del self.request.session['old_value']\n\n return self.initial.copy()\n\n def form_valid(self, form):\n order = form.save(commit=False)\n\n session_id = int(self.request.GET['session'])\n session_object = Session.objects.get(id=session_id)\n hall_object = Hall.objects.get(id=session_object.hall_id)\n allowed_tickets = Hall.objects.get(id=session_object.hall_id).hall_capacity - session_object.purchased_tickets\n\n if session_object.purchased_tickets == hall_object.hall_capacity:\n\n msg = 'No seats left for the chosen session'\n messages.error(self.request, msg)\n\n if session_object.start_datetime.date() == date.today():\n return HttpResponseRedirect('/schedule_today/')\n\n return HttpResponseRedirect('/schedule_tomorrow/')\n\n if order.ordered_seats > allowed_tickets:\n self.request.session['old_value'] = order.ordered_seats\n\n msg = 'You can order only at least {} tickets'.format(allowed_tickets)\n messages.error(self.request, msg)\n\n return HttpResponseRedirect('/order_ticket/?session={}'.format(session_id))\n\n order.buyer = self.request.user\n order.ticket_for_session = session_object\n\n order.save()\n\n return super().form_valid(form)\n\n\nclass PurchasedTicketsListView(LoginRequiredMixin, ListView):\n model = Ticket\n template_name = 'purchased_ticket_list.html'\n paginate_by = 10\n\n def get_queryset(self):\n queryset = Ticket.objects.filter(buyer=self.request.user).order_by('-ticket_for_session__start_datetime')\n return queryset\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(object_list=object_list, **kwargs)\n\n total_amount = self.get_queryset().values('ticket_for_session__session_price', 'ordered_seats').aggregate(\n total_sum=Sum(ExpressionWrapper(F('ticket_for_session__session_price') * F('ordered_seats'),\n output_field=DecimalField())))\n\n context['total_amount'] = total_amount['total_sum']\n\n return context\n", "sub_path": "cinema_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 18977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 21, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 24, "usage_type": "name"}, {"api_name": "cinema_app.models.CinemaUser", "line_number": 25, "usage_type": "name"}, {"api_name": "cinema_app.forms.SignUpForm", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 31, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 38, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 68, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 77, "usage_type": "name"}, {"api_name": "cinema_app.models.Session", "line_number": 78, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.all", "line_number": 81, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 81, "usage_type": "name"}, {"api_name": "braces.views.SuperuserRequiredMixin", "line_number": 84, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 84, "usage_type": "name"}, {"api_name": "braces.views.SuperuserRequiredMixin", "line_number": 88, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 88, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall", "line_number": 89, "usage_type": "name"}, {"api_name": "cinema_app.forms.HallForm", "line_number": 90, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall.objects.values_list", "line_number": 107, "usage_type": "call"}, {"api_name": "cinema_app.models.Hall.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Hall", "line_number": 107, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 113, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 115, "usage_type": "call"}, {"api_name": "braces.views.SuperuserRequiredMixin", "line_number": 120, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 120, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall", "line_number": 121, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall.objects.all", "line_number": 123, "usage_type": "call"}, {"api_name": "cinema_app.models.Hall.objects", "line_number": 123, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Hall", "line_number": 123, "usage_type": "name"}, {"api_name": "cinema_app.models.Ticket.objects.filter", "line_number": 127, "usage_type": "call"}, {"api_name": "cinema_app.models.Ticket.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Ticket", "line_number": 127, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 128, "usage_type": "call"}, {"api_name": "cinema_app.schedule_settings.ALLOWED_DAYS_BEFORE_EDITING", "line_number": 129, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall.objects.exclude", "line_number": 131, "usage_type": "call"}, {"api_name": "cinema_app.models.Hall.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Hall", "line_number": 131, "usage_type": "name"}, {"api_name": "braces.views.SuperuserRequiredMixin", "line_number": 136, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 136, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall", "line_number": 137, "usage_type": "name"}, {"api_name": "cinema_app.forms.HallForm", "line_number": 138, "usage_type": "name"}, {"api_name": "cinema_app.models.Ticket.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "cinema_app.models.Ticket.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Ticket", "line_number": 155, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 156, "usage_type": "call"}, {"api_name": "cinema_app.schedule_settings.ALLOWED_DAYS_BEFORE_EDITING", "line_number": 157, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 164, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 164, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 166, "usage_type": "call"}, {"api_name": "cinema_app.models.Hall.objects.exclude", "line_number": 168, "usage_type": "call"}, {"api_name": "cinema_app.models.Hall.objects", "line_number": 168, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Hall", "line_number": 168, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 174, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 174, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 176, "usage_type": "call"}, {"api_name": "braces.views.SuperuserRequiredMixin", "line_number": 181, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 181, "usage_type": "name"}, {"api_name": "cinema_app.models.Session", "line_number": 182, "usage_type": "name"}, {"api_name": "cinema_app.forms.CreateSessionForm", "line_number": 183, "usage_type": "name"}, {"api_name": "cinema_app.models.Film.objects.get", "line_number": 190, "usage_type": "call"}, {"api_name": "cinema_app.models.Film.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Film", "line_number": 190, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 201, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects.filter", "line_number": 206, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 206, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 206, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 209, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 219, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 219, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.get", "line_number": 224, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 224, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 224, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 229, "usage_type": "call"}, {"api_name": "cinema_app.schedule_settings.BREAK_BETWEEN_FILMS_MINUTES", "line_number": 230, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 233, "usage_type": "call"}, {"api_name": "cinema.settings.TIME_ZONE", "line_number": 233, "usage_type": "argument"}, {"api_name": "django.contrib.messages.warning", "line_number": 248, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 248, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 257, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 257, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 259, "usage_type": "call"}, {"api_name": "braces.views.SuperuserRequiredMixin", "line_number": 262, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 262, "usage_type": "name"}, {"api_name": "cinema_app.models.Session", "line_number": 263, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 269, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.filter", "line_number": 273, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 273, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 273, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 274, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 274, "usage_type": "call"}, {"api_name": "cinema_app.schedule_settings.ALLOWED_DAYS_BEFORE_EDITING", "line_number": 274, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 276, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 277, "usage_type": "argument"}, {"api_name": "braces.views.SuperuserRequiredMixin", "line_number": 284, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 284, "usage_type": "name"}, {"api_name": "cinema_app.models.Session", "line_number": 285, "usage_type": "name"}, {"api_name": "cinema_app.forms.EditSessionForm", "line_number": 286, "usage_type": "name"}, {"api_name": "cinema_app.models.Film.objects.get", "line_number": 302, "usage_type": "call"}, {"api_name": "cinema_app.models.Film.objects", "line_number": 302, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Film", "line_number": 302, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 307, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 307, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.filter", "line_number": 309, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 309, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 309, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 318, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 318, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.get", "line_number": 323, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 323, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 323, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 328, "usage_type": "call"}, {"api_name": "cinema_app.schedule_settings.BREAK_BETWEEN_FILMS_MINUTES", "line_number": 329, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 332, "usage_type": "call"}, {"api_name": "cinema.settings.TIME_ZONE", "line_number": 332, "usage_type": "argument"}, {"api_name": "django.contrib.messages.warning", "line_number": 347, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 347, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 356, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 356, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 358, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 361, "usage_type": "name"}, {"api_name": "cinema_app.models.Session", "line_number": 362, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.filter", "line_number": 364, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 364, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 364, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 364, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 364, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 369, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 373, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 374, "usage_type": "argument"}, {"api_name": "django.views.generic.ListView", "line_number": 380, "usage_type": "name"}, {"api_name": "cinema_app.models.Session", "line_number": 381, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.filter", "line_number": 383, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 383, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 383, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 384, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 384, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 384, "usage_type": "call"}, {"api_name": "cinema_app.schedule_settings.ALLOWED_DAYS_BEFORE_EDITING", "line_number": 384, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 389, "usage_type": "name"}, {"api_name": "cinema_app.forms.TicketPurchaseForm", "line_number": 390, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 394, "usage_type": "name"}, {"api_name": "cinema_app.schedule_settings.SCHEDULE_SORTING_METHODS", "line_number": 395, "usage_type": "argument"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 401, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 401, "usage_type": "name"}, {"api_name": "cinema_app.models.Ticket", "line_number": 402, "usage_type": "name"}, {"api_name": "cinema_app.forms.TicketPurchaseForm", "line_number": 403, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.get", "line_number": 410, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 410, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 410, "usage_type": "name"}, {"api_name": "cinema_app.models.Session.objects.get", "line_number": 430, "usage_type": "call"}, {"api_name": "cinema_app.models.Session.objects", "line_number": 430, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Session", "line_number": 430, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall.objects.get", "line_number": 431, "usage_type": "call"}, {"api_name": "cinema_app.models.Hall.objects", "line_number": 431, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Hall", "line_number": 431, "usage_type": "name"}, {"api_name": "cinema_app.models.Hall.objects.get", "line_number": 432, "usage_type": "call"}, {"api_name": "cinema_app.models.Hall.objects", "line_number": 432, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Hall", "line_number": 432, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 437, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 437, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 439, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 439, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 440, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 442, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 448, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 448, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 450, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 460, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 460, "usage_type": "name"}, {"api_name": "cinema_app.models.Ticket", "line_number": 461, "usage_type": "name"}, {"api_name": "cinema_app.models.Ticket.objects.filter", "line_number": 466, "usage_type": "call"}, {"api_name": "cinema_app.models.Ticket.objects", "line_number": 466, "usage_type": "attribute"}, {"api_name": "cinema_app.models.Ticket", "line_number": 466, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 473, "usage_type": "call"}, {"api_name": "django.db.models.ExpressionWrapper", "line_number": 473, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 473, "usage_type": "call"}, {"api_name": "django.db.models.DecimalField", "line_number": 474, "usage_type": "call"}]} +{"seq_id": "612888289", "text": "\n#===============================================================================\n#\n# SSG Build system\n#\n# Copyright (c) 2015-2017 Qualcomm Technologies, Inc. All Rights Reserved.\n# Qualcomm Technologies Proprietary and Confidential.\n#\n#===============================================================================\n'''\n Created on Jan. 14, 2016\n @authors: c_jacke, lenhardw\n\n Python Script for creating Builds\n\n Syntax: run with --help for syntax\n\n'''\n\nimport logging\n\nVERBOSE = 5\nlogging.basicConfig()\nlogger = logging.getLogger(\"build_all\")\nlogger.setLevel(logging.INFO)\n\nimport glob\nimport optparse\nimport os\nimport platform\nimport re\nimport signal\nimport shutil\nimport socket\nimport sys\nimport time\nimport traceback\n\nfrom subprocess import call\nfrom xml.dom import minidom\nfrom xml.etree import ElementTree\n\nimport BuildConfigParser\n\ntry:\n import cleanpack_tz2 as cleanpack_tz # FIXME rename file to cleanpack_tz\nexcept ImportError:\n cleanpack_tz = None # pylint: disable=C0103\n\n# directory containing this script\nEXEC_DIR = os.path.dirname(os.path.realpath(__file__))\n\n# Flags\nFORBID_UNDOCUMENTED_ARTIFACTS = False\nKEEP_GOING = False\n\n# TODO - move these to XML config as well if possible\n# root of the TZ build (two levels up from this script)\nBUILD_ROOT = os.path.abspath(os.path.join(EXEC_DIR, os.pardir, os.pardir))\nPACKAGE_ROOT = os.path.abspath(os.path.join(BUILD_ROOT, os.pardir))\nTOOLS_SCONS_PATH = os.path.join(\"tools\", \"build\", \"scons\")\nSCONS_BUILD_PATH = \"build\"\nBUILD_PATH = os.path.join(\"build\", \"ms\")\nLOGFILE_ROOT = os.path.join(BUILD_ROOT, BUILD_PATH)\nTEMP_DIR = os.path.join(PACKAGE_ROOT, \"temp\")\nCONFIG_FILES = [\n os.path.join(LOGFILE_ROOT, \"build_config.xml\"),\n os.path.join(LOGFILE_ROOT, \"build_config_deploy.xml\"),\n]\nREPORT_GENERATOR_SCRIPT = os.path.join(BUILD_ROOT, TOOLS_SCONS_PATH, \\\n \"linker_reporter\", \"report_generator.py\")\nREPORT_GENERATOR_PATH = os.path.join(\"linker_reporter\", \"report_generator.py\")\n\nMANIFEST_FILE = os.path.join(BUILD_ROOT, \"build\", \"manifest.xml\")\nBUILD_LOG_PREFIX = \"build-log\"\nUNIFIED_LOG = \"LOGFILE\"\nCOV_FILE_NAME = \"core_tz_bullseye.cov\"\nCOV_LOG_FILE = \"core_tz_bullseye.log\"\n\nDEFAULT_ARGS = [\n \"-f\",\n \"target.scons\",\n]\n\nCLEANPACK_ARGS = [\n \"-c\",\n \"--implicit-deps-unchanged\",\n \"--cleanpack\",\n]\n\nCLEANPACK_COPY_TARGETS = [\n os.path.join(\"trustzone_images\", \"core\", \"securemsm\", \"trustzone\", \\\n \"monitor\"),\n os.path.join(\"trustzone_images\", \"core\", \"securemsm\", \"trustzone\", \"qsee\"),\n]\n\n# TODO - clean these directory references up somehow\nHK11_DIR = os.path.join(PACKAGE_ROOT, \"HK11\")\nHY11_DIR = os.path.join(PACKAGE_ROOT, \"HY11_1\")\n\nDEFAULT_MAPREPORT = False\nDEFAULT_OS = \"default-os\"\n\nCRM_CLIENT_LOG = \"crm_client.log\"\nCRM_INFO_FILE = \"crm_info.txt\"\n\nENVIRONMENT_DUMP_SHELL = \"envdump.sh\"\nENVIRONMENT_DUMP_BATCH = \"envdump.cmd\"\n\nSHORT_DELAY = 1\nMEDIUM_DELAY = 2\nLONG_DELAY = 5\nDEQUEUE_RETRY_DELAY = 2\nWORKER_WAIT_TIME = 60\nFAST_BUILD = False\nSEPARATOR = 40 * \"-\"\nCLEAN_FLAG = \"-c\"\n\n# worker response types\nWORKER_SUCCESS = 0\nWORKER_FAILURE = 1\nWORKER_EXITED = 2\nWORKER_EXCEPTION = 3\n\nclass ArtifactNotFoundException(IOError):\n \"\"\"Custom exception thrown for missing build artifacts\"\"\"\n pass\n\n# error codes\n# TODO: for now, every line using one of these has a pylint suppression comment.\n# However, this mechanism should be redesigned to allow proper checking of\n# variable definitions as bugs could sneak past the checker.\nERROR_CODES = {\n \"ERROR_OK\" : 0,\n \"ERROR_INVALID_ARG\" : -1,\n \"ERROR_BAD_CONFIG\" : -2,\n \"ERROR_INVALID_ARTIFACT\" : -3,\n \"ERROR_SYS_ERR\" : -4,\n \"ERROR_BAD_PATH\" : -5,\n \"ERROR_WORKER_EXCEPTION\" : -6,\n \"ERROR_INTERRUPTED\" : -7,\n \"ERROR_UNKNOWN\" : -8,\n}\n\ndef _setErrors(mapping):\n \"\"\"Translate ERROR_CODES into usable variables\"\"\"\n _globals = globals()\n for key in mapping:\n val = mapping[key]\n _globals[key] = val\n for key2 in mapping:\n if key == key2:\n continue\n assert mapping[key2] != val, \\\n \"%s & %s cannot have the same value\" % (key, key2)\n\n_setErrors(ERROR_CODES)\n\ndef strerr(err):\n \"\"\"Return human-readable strings for error codes\"\"\"\n for key in ERROR_CODES:\n if ERROR_CODES[key] == err:\n return key\n logger.debug(\"%d is not a recognized error code\" % err)\n return str(err)\n\ndef buildAll():\n \"\"\"main build function\"\"\"\n # TODO - @redesign to reduce returns, branches, local vars & statements\n #pylint: disable=R0911, R0912, R0914, R0915\n startTime = time.time()\n (options, args) = parseArguments()\n\n if options.verbose:\n if logger.getEffectiveLevel() > logging.DEBUG:\n logger.setLevel(logging.DEBUG)\n DEFAULT_ARGS.append(\"--verbose=2\")\n\n global FORBID_UNDOCUMENTED_ARTIFACTS #pylint: disable=W0603\n global KEEP_GOING # pylint: disable=W0603\n\n if options.keep_going:\n KEEP_GOING = True\n\n if options.fast_build:\n global FAST_BUILD #pylint: disable=W0603\n FAST_BUILD = True\n\n cleanBuild = options.clean or options.clean_build\n makeBuild = not options.clean\n\n # change to build/ms directory for config stage\n os.chdir(LOGFILE_ROOT)\n\n # load the XML config\n config = BuildConfigParser.loadXml(options.config_file)\n if len(config) == 0:\n logger.critical(\"failed to load %s\" % options.config_file)\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n\n if options.detect_branch:\n branch = detectBranch(config)\n if branch != None:\n print(branch)\n return ERROR_OK #pylint: disable=E0602\n else:\n logger.error(\"Could not auto-detect branch\")\n return ERROR_INVALID_ARG #pylint: disable=E0602\n\n # get a mapping of branch aliases to actual branch names\n aliasMap = BuildConfigParser.loadAliasMap(config)\n\n autoSelectedBranch = False\n if options.branch == None:\n branch = detectAndLogBranch(config)\n if branch == None:\n # we are guaranteed to have at least one non-virtual branch in xml\n productionBranches = []\n for branchName in config:\n if config[branchName][\"virtual\"]:\n continue\n # return an actual branch, not an alias\n if branchName not in aliasMap:\n productionBranches.append(branchName)\n productionBranches.sort()\n availableBranches = \"Available branches are:\\n%s\" \\\n % \"\\n\".join(productionBranches)\n logger.critical( \\\n #pylint: disable=C0301\n \"Could not detect branch. Please pass it explicitly:\\nEg: -b %s\\n\\n%s\" \\\n % (productionBranches[0], availableBranches))\n return ERROR_INVALID_ARG #pylint: disable=E0602\n autoSelectedBranch = True\n else:\n branch = options.branch\n logger.debug(\"Branch '%s' explicitly passed via command line\" % branch)\n\n branchConfig = config.get(branch)\n if branchConfig == None:\n logger.critical(\"Branch '%s' not found in %s\" \\\n % (branch, options.config_file))\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n if branchConfig[\"virtual\"] == True:\n logger.critical( \\\n \"Branch '%s' is virtual (non-production) and cannot be built\" \\\n % branch)\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n\n # We have a legitimate branch; now translate any alias to its real name\n rawBranch = branch\n branch = aliasMap.get(branch, rawBranch)\n if rawBranch != branch:\n logger.debug(\"Translate branch alias '%s' --> '%s'\" \\\n % (rawBranch, branch))\n\n # Now that we have our config & branch, parse any leftover arguments to\n # establish additional params (eg X=Y), target files (eg tz), or pass-though\n # args (anything leftover after the others)\n buildInput = parseExtraArguments(args, branchConfig)\n cmdParams = buildInput[\"params\"]\n\n # set software image for use by SConscripts\n cmdParams[\"SOFTWARE_IMAGE\"] = branch\n\n if options.scons_args:\n buildInput[\"args\"].append(options.scons_args)\n\n # IMPORTANT NOTE:\n # While 'chipset' & 'target' may appear overloaded, there is a distinct\n # difference: 'target' represents a buildable target name, as represented in\n # build_config.xml by <chipset> tags. A target's 'chipset' is USUALLY but NOT\n # ALWAYS its name. Differences may occur for special targets like\n # pre-silicon build targets (eg rumi/virtio) or targets with binary\n # compatibility (eg msm8976 uses the 'msm8956' chipset).\n # NOTE: <chipset> tags used to be (legacy) <target> tags, but were changed\n # to reduce confusion\n #\n # This difference is abstracted away from the user via the CHIPSET param\n # which actually references a 'target', letting the build script perform any\n # necessary translation. Errors to the user will always reflect 'chipset'.\n targets = []\n target = cmdParams.get(\"CHIPSET\")\n autoSelectedTarget = False\n if target == None:\n if options.use_default_target:\n defaultTarget = branchConfig[\"default_target\"]\n if defaultTarget == None:\n logger.critical( \\\n #pylint: disable=C0301\n \"No default target found for branch '%s' - must provide a valid chipset (eg CHIPSET=%s)\" \\\n % defaultTarget)\n return ERROR_INVALID_ARG #pylint: disable=E0602\n targets = [ defaultTarget ]\n else:\n # build all valid targets for this branch\n for target in branchConfig[\"targets\"]:\n targetInfo = branchConfig[\"targets\"][target]\n if targetInfo[\"buildable\"] and not targetInfo[\"exclusive\"]:\n targets.append(target)\n #pylint: disable=C0301\n assert len(targets) > 0, \\\n \"%s is invalid - no buildable targets listed for branch %s\" \\\n % (options.config_file, branch)\n autoSelectedTarget = True\n else:\n targets = target.split(\",\")\n logger.debug( \\\n \"Chipset(s) explicitly passed via command line: {}\".format( \\\n \", \".join(targets)))\n if options.use_default_target:\n logger.critical( \\\n \"Cannot both specify a chipset & request the default\")\n return ERROR_INVALID_ARG #pylint: disable=E0602\n # Now, remove the 'CHIPSET' param so that the given target gets properly\n # translated to a chipset later via our build config\n del cmdParams[\"CHIPSET\"]\n targets.sort() # sort the targets for more consistent build behavior\n del target # we no longer need this & redefine it immediately below\n\n # validate selected targets\n for target in targets:\n targetConfig = branchConfig[\"targets\"].get(target)\n if targetConfig == None:\n logger.critical(\"Chipset '%s' not defined in %s\" \\\n % (target, options.config_file))\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n if targetConfig[\"buildable\"] == False:\n logger.critical(\"Chipset '%s' is not a buildable target\" % target)\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n if targetConfig[\"exclusive\"] == True and len(targets) > 1:\n logger.critical( \\\n \"Chipset '%s' cannot be built with other targets\" % target)\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n if targetConfig[\"chipset\"] != target:\n logger.debug(\"Target '%s' maps to chipset '%s'\" \\\n % (target, targetConfig[\"chipset\"]))\n\n # if files were specified via the command line, verify that they apply\n # to at least one of the selected targets\n for f in buildInput[\"files\"]:\n valid = False\n for target in targets:\n targetConfig = branchConfig[\"targets\"].get(target)\n fileMeta = targetConfig[\"files\"].get(f)\n if fileMeta != None:\n valid = True\n if fileMeta[\"disable\"]:\n logger.warn( \\\n #pylint: disable=C0301\n \"'{}' (disabled in {} {} config) is explicitly enabled by the user\".format( \\\n f, target, branch))\n else:\n logger.debug( \\\n \"'{}' is not supported for {} on branch {}\".format( \\\n f, target, branch))\n if not valid:\n logger.critical( \\\n # pylint: disable=C0301\n \"'{}' is not supported for any chipset ({}) on branch {}\".format( \\\n f, \"/\".join(targets), branch))\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n\n addCustomBuildTargets(options, branchConfig, buildInput)\n\n if options.list_targets:\n print(\", \".join(targets))\n return ERROR_OK #pylint: disable=E0602\n\n osType = getOsType()\n setupEnv(targetConfig, osType, options.dump_environment)\n\n setStaticAnalysisEnvironment(options.enable_static_analysis, buildInput)\n\n if options.code_coverage:\n setCodeCoverageEnvironment()\n\n if checkEnv(osType) != 0:\n logger.critical( \\\n #pylint: disable=C0301\n \"Environment settings are incorrect - please see previous error message. Exiting ...\")\n return ERROR_BAD_CONFIG #pylint: disable=E0602\n\n targetCount = len(targets)\n if targetCount > 1:\n if cmdParams.get(\"BUILD_ID\") != None:\n logger.critical( \\\n #pylint: disable=C0301\n \"Cannot use custom build id (%s) when building multiple chipsets (%s)\" \\\n % (cmdParams.get(\"BUILD_ID\"), \", \".join(targets)))\n return ERROR_INVALID_ARG #pylint: disable=E0602\n if cmdParams.get(\"TARGET_FAMILY\") != None:\n logger.critical( \\\n #pylint: disable=C0301\n \"Cannot use custom target family (%s) when building multiple chipsets (%s)\" \\\n % (cmdParams.get(\"TARGET_FAMILY\"), \", \".join(targets)))\n return ERROR_INVALID_ARG #pylint: disable=E0602\n if cmdParams.get(\"SOC_HW_VERSION\") != None:\n logger.critical( \\\n #pylint: disable=C0301\n \"Cannot use custom SOC_HW_VERSION (%s) when building multiple chipsets (%s)\" \\\n % (cmdParams.get(\"SOC_HW_VERSION\"), \", \".join(targets)))\n return ERROR_INVALID_ARG #pylint: disable=E0602\n if cmdParams.get(\"SOC_VERS\") != None:\n logger.critical( \\\n #pylint: disable=C0301\n \"Cannot use custom SOC_VERS (%s) when building multiple chipsets (%s)\" \\\n % (cmdParams.get(\"SOC_VERS\"), \", \".join(targets)))\n return ERROR_INVALID_ARG #pylint: disable=E0602\n\n os.chdir(BUILD_ROOT) # ensure that we start in the BUILD_ROOT directory\n printDiagnosticInfo(branch, targets, options, buildInput, \\\n autoSelectedBranch)\n # The user is relying on some default arguments & auto-detection and may not\n # expect the config that has been chosen. Pause to give them a chance to\n # identify any errors and cancel the build.\n if (autoSelectedBranch or autoSelectedTarget) and not FAST_BUILD:\n time.sleep(LONG_DELAY)\n\n # create the manifest after the delay (in case the user aborts)\n if createManifestIfNeeded(branch):\n logger.debug(\"Generated updated manifest file\")\n\n # TODO - add logic forbidding combinations of some flags, such as clean +\n # recompile only or cleanpack + clean-and-build\n retCode = ERROR_OK #pylint: disable=E0602\n loadableParamNames = [\n \"BUILD_ASIC\",\n \"BUILD_ID\",\n \"BUILD_VER\",\n \"CHIPSET\", # handles the translation from target -> chipset\n \"HAL_PLATFORM\",\n \"MSM_ID\",\n \"TARGET_FAMILY\",\n \"SOC_HW_VERSION\",\n \"SOC_VERS\",\n ]\n\n # pylint: disable-msg=E0602\n assert not KEEP_GOING or ERROR_OK == 0, \\\n \"BUG: Error accumulator requires ERROR_OK == 0\"\n errorAccumulator = ERROR_OK\n # pylint: enable-msg=E0602\n\n for target in targets:\n # Each target's config was already validated above\n targetConfig = branchConfig[\"targets\"][target]\n targetParams = dict(cmdParams) # shallow copy to preserve the original\n targetArgs = list(DEFAULT_ARGS) # shallow copy\n targetArgs.extend(buildInput[\"args\"])\n\n for paramName in loadableParamNames:\n if targetParams.get(paramName) == None:\n value = targetConfig[paramName.lower()]\n logger.debug(\"%s = %s\" % (paramName, value))\n targetParams[paramName] = value\n\n printParams(targetParams, \"build\")\n\n # An empty list evaluates to false; fileList becomes None in this case\n # TODO - need to ensure that all dependencies are also added & that they\n # are in an acceptable order, although since SCons handles most of this,\n # it is a minor problem for now\n fileList = buildInput[\"files\"] or None\n\n if options.test_mode:\n if cleanBuild or options.recompile_only or options.clean_pack \\\n or not makeBuild:\n logger.error(\"Test mode can only be used with a normal build\")\n return ERROR_INVALID_ARG #pylint: disable=E0602\n # force full artifact verification\n FORBID_UNDOCUMENTED_ARTIFACTS = True\n\n if not FAST_BUILD:\n time.sleep(MEDIUM_DELAY)\n\n if cleanBuild:\n logger.info(\"Cleaning %s for %s\" % (branch, target))\n # must maintain our original args for later, so copy them\n cleanArgs = list(targetArgs)\n cleanArgs.append(CLEAN_FLAG)\n # Don't check artifacts since clean should remove them\n retCode = buildTargetFiles(targetConfig, targetParams, cleanArgs, \\\n fileList = fileList, checkArtifacts = False)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"Error encountered when cleaning %s for %s\" \\\n % (branch, target))\n if not KEEP_GOING:\n break\n else:\n errorAccumulator |= retCode\n if not makeBuild:\n continue\n if options.recompile_only:\n logger.info(\"Triggering recompilation only of %s for %s\" \\\n % (branch, target))\n # TODO - does recompilation need to rename the build log too?\n retCode = recompileTargetFiles(targetConfig, \\\n targetParams, targetArgs)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"Error encountered in recompilation of %s for %s\" \\\n % (branch, target))\n if not KEEP_GOING:\n break\n else:\n errorAccumulator |= retCode\n continue\n elif options.clean_pack:\n logger.info(\"Triggering cleanpack of %s for %s\" % (branch, target))\n # TODO - does cleanpack need to rename the build log too?\n retCode = cleanpack(branch, targetConfig, targetParams, \\\n targetArgs)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"Error encountered in cleanpack of %s for %s\" \\\n % (branch, target))\n if not KEEP_GOING:\n break\n else:\n errorAccumulator |= retCode\n continue\n # Finally, we're ready to make a normal build\n retCode = buildTargetFiles(targetConfig, targetParams, targetArgs, \\\n fileList = fileList, checkArtifacts = True, \\\n mapreport = options.map_report, testMode = options.test_mode)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"Error encountered when building %s for %s\" \\\n % (branch, target))\n if not KEEP_GOING:\n break\n else:\n errorAccumulator |= retCode\n completionTime = time.time()\n elapsedTime = completionTime - startTime\n logger.info(\"Build %s\" % (\"completed successfully\" if retCode == 0 \\\n else \"encountered errors\"))\n logger.info(\"Total elapsed time: %d minutes, %d seconds\" % \\\n (elapsedTime // 60, elapsedTime % 60))\n if KEEP_GOING and errorAccumulator != ERROR_OK: # pylint: disable=E0602\n return errorAccumulator\n return retCode\n\ndef createManifestIfNeeded(branch):\n \"\"\"Some files in the build tree rely on trustzone_images/build/manifest.xml\n for the branch. Generate one if it does not exist to prevent a build\n failure. Adds a custom flag indicating that the manifest was created\n by build_all.py so that we can re-create it if the branch changes.\"\"\"\n if checkManifest(MANIFEST_FILE, checkCreator = True):\n # manifest exists and is valid as-is\n return False\n # Generate actual XML\n configXml = ElementTree.Element(\"config\")\n imageTreeXml = ElementTree.SubElement(configXml, \"image_tree\")\n nameXml = ElementTree.SubElement(imageTreeXml, \"name\")\n nameXml.text = branch\n revisionXml = ElementTree.SubElement(imageTreeXml, \"revision\")\n revisionXml.text = \"00000\"\n creatorXml = ElementTree.SubElement(configXml, \"creator\")\n creatorXml.set(\"name\", os.path.realpath(__file__))\n # Write results to file\n try:\n outfile = open(MANIFEST_FILE, \"w\")\n except (IOError, OSError) as e:\n logger.warn(\"[%s] failed to write to %s\" % (str(e), MANIFEST_FILE))\n assert os.path.exists(MANIFEST_FILE), \\\n \"Could not create manifest - build will fail if we continue\"\n return False # if it already exists, just hope for the best\n declaration = minidom.Document().toxml()\n reparsedXML = minidom.parseString(ElementTree.tostring(configXml, \"utf-8\"))\n outfile.write(reparsedXML.toprettyxml(indent = \" \")[len(declaration) + 1:])\n outfile.close()\n assert checkManifest(MANIFEST_FILE), \"BUG: created invalid manifest\"\n return True\n\ndef checkManifest(filename, checkCreator = False):\n \"\"\"Returns True if this is valid manifest XML file, False otherwise. If\n 'checkCreator' is True, also validates whether the file was created by\n this script, returning 'False' (invalid file) if so.\"\"\"\n if not os.path.exists(filename):\n return False\n try:\n tree = ElementTree.parse(filename)\n except ElementTree.ParseError as e:\n logger.warn(\"[%s] failed to parse %s\" % (str(e), filename))\n return False\n root = tree.getroot()\n if checkCreator:\n creator = root.find(\"creator\")\n if creator != None:\n name = creator.get(\"name\")\n if name == os.path.realpath(__file__):\n return False\n imageTree = root.find(\"image_tree\")\n if imageTree == None:\n logger.debug(\"Missing <image_tree>\")\n return False\n name = imageTree.findall(\"name\")\n if name == None:\n logger.debug(\"Missing <name> in <image_tree>\")\n return False\n revision = imageTree.findall(\"revision\")\n if revision == None:\n logger.debug(\"Missing <revision> in <image_tree>\")\n # missing return is intentional - we don't really care about revision\n return True\n\ndef setupEnv(config, osType, dump = False):\n \"\"\"\n Sets up the environment variables used by programs\n that are called from this python code. Sets them up\n differently for Windows, Linux and MAC.\n\n \"\"\"\n logger.debug(\"Setting environment variables\")\n sys.stdout.flush()\n\n defaultEnv = config[\"environment\"].get(DEFAULT_OS, {})\n if len(defaultEnv) == 0:\n logger.warn(\"No default environment found!\")\n environmentConfig = config[\"environment\"].get(osType, defaultEnv)\n\n outfile = None\n if dump:\n if(isWindows()):\n outfile = open(ENVIRONMENT_DUMP_BATCH,\"w\")\n outfile.write(\"@ECHO Environment Variables \\n\")\n else:\n outfile = open(ENVIRONMENT_DUMP_SHELL,\"w\")\n outfile.write(\"#!/bin/bash\\n\")\n\n\n for varName in environmentConfig[\"variables\"]:\n varInfo = environmentConfig[\"variables\"][varName]\n if varInfo[\"export\"]:\n # 'value' must always be present; don't worry about KeyError\n value = varInfo[\"value\"]\n logger.debug(\"Setting %s=%s\" % (varName, value))\n os.environ[varName] = value\n if outfile:\n if(isWindows()):\n outfile.write(\"SET %s=%s\\n\" % (varName, value))\n else:\n outfile.write(\"export %s=%s\\n\" % (varName, value))\n if outfile:\n outfile.close()\n\ndef checkEnv(osType):\n \"\"\"Checks for the presense of mandatory build tools and environment\n variables. Returns an error if a needed tool or env var is missing.\"\"\"\n logV(\"%s environment appears ok\" % osType)\n return ERROR_OK #pylint: disable=E0602\n\ndef getOsType():\n \"\"\"\n Determines the OS type:\n \"\"\"\n return platform.system()\n\ndef isWindows():\n \"\"\"Returns True if the system is a Windows variant & False otherwise\"\"\"\n return \"win\" in getOsType().lower()\n\ndef getCpuCount():\n \"\"\"Returns the number of available CPUs in the current system\"\"\"\n cpuCount = 1\n if hasattr(os, \"sysconf\") and \\\n os.sysconf_names.has_key(\"SC_NPROCESSORS_ONLN\"):\n # Unix\n cpuCount = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n elif os.environ.has_key(\"NUMBER_OF_PROCESSORS\"):\n # Windows\n cpuCount = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n logger.debug(\"Detected %d CPUs\" % cpuCount)\n return cpuCount\n\ndef detectAndLogBranch(config):\n \"\"\"Attempts to auto-detect the branch & logs the result\"\"\"\n branch = detectBranch(config)\n if branch:\n logger.debug(\"Auto-detected branch '%s'\" % branch)\n return branch\n\ndef detectBranch(config):\n \"\"\"Attempts to auto-detect the current branch\"\"\"\n vceFolder = os.path.join(PACKAGE_ROOT, \"vce\")\n if os.path.exists(vceFolder):\n branch = detectVceBranch(vceFolder, config)\n if branch != None:\n return branch\n #if os.path.exists(repoFolder):\n # We could not find any metadata indicating the branch, so attempt to get it\n # directly from the root folder's name, as this is our 'best guess'\n branchOptions = list(config.keys())\n # sort our potential branches (with aliases mixed in) by descending length\n # so that we check for longest match first\n branchOptions.sort(key = len, reverse = True)\n dirName = os.path.basename(os.path.normpath(PACKAGE_ROOT))\n for branch in branchOptions:\n if branch in dirName:\n logger.debug(\"Guessed branch '%s' from path name '%s'\" \\\n % (branch, dirName))\n return branch\n return None\n\ndef detectVceBranch(vceDir, config):\n \"\"\"Attempts to auto-detect the current branch via VCE sync logs\"\"\"\n productionBranches = []\n for key in config:\n branchInfo = config[key]\n if branchInfo[\"virtual\"]:\n continue\n elif key in branchInfo[\"aliases\"]:\n # this is an alias; skip it\n continue\n productionBranches.append(key)\n # sort branch names by longest-first for matching purposes\n productionBranches.sort(key = len, reverse = True)\n # check the crm_client_log file first as it is usually most accurate\n crmLog = os.path.join(vceDir, CRM_CLIENT_LOG)\n branch = parseCrmClientLog(crmLog, productionBranches)\n if branch != None:\n return branch\n # we still haven't found a branch so try the crm_info file\n crmInfo = os.path.join(vceDir, CRM_INFO_FILE)\n branch = parseCrmInfoFile(crmInfo, productionBranches)\n return branch\n\ndef parseCrmClientLog(path, branches):\n \"\"\"Attempts to read the branch from a CRM client log file\"\"\"\n crmLogLines = readLines(path) or []\n branch = None\n for line in crmLogLines:\n line = line.strip()\n if not line.startswith(\"+\"):\n continue\n logger.debug(\"Candidate line for branch: %s\" % line)\n branchCandidates = matchBranches(line, branches)\n if len(branchCandidates) == 0:\n continue\n branch = branchCandidates[-1]\n logger.debug(\"Detected branch %s in crm log\" % branch)\n break\n return branch\n\ndef parseCrmInfoFile(path, branches):\n \"\"\"Attempts to read the branch from a CRM info file\"\"\"\n crmInfoLines = readLines(path)\n if crmInfoLines == None:\n return None\n plfCandidate = None\n crmCandidate = None\n imageCandidate = None\n for line in crmInfoLines:\n line = line.strip()\n if \"=\" not in line:\n continue\n parts = line.split(\"=\")\n potentialBranch = \"=\".join(parts[1:])\n if parts[0].startswith(\"PLF_USED\") and \"=\" in line:\n branchCandidates = matchBranches(potentialBranch, branches)\n if len(branchCandidates) == 0:\n continue\n plfCandidate = branchCandidates[-1]\n logger.debug(\"Detected branch %s in crm info plf\" % plfCandidate)\n elif parts[0].startswith(\"CRM_Append\") and \"=\" in line:\n branchCandidates = matchBranches(potentialBranch, branches)\n if len(branchCandidates) == 0:\n continue\n crmCandidate = branchCandidates[-1]\n logger.debug(\"Detected branch %s in crm info append\" % crmCandidate)\n elif parts[0].startswith(\"Software Image\") and \"=\" in line:\n if potentialBranch not in branches:\n logger.debug( \\\n #pylint: disable=C0301\n \"Found SI %s in %s, but it does not match any production branch\" \\\n % (potentialBranch, CRM_INFO_FILE))\n continue\n if imageCandidate != None:\n logger.warn( \\\n #pylint: disable=C0301\n \"Found multiple Software Images in %s; replacing %s with %s\" \\\n % (CRM_INFO_FILE, imageCandidate, potentialBranch))\n else:\n logger.debug(\"Found SI %s in crm info\" % potentialBranch)\n imageCandidate = potentialBranch\n return plfCandidate or crmCandidate or imageCandidate\n\ndef matchBranches(line, branchList):\n \"\"\"Matches text against a list of known branches\"\"\"\n ret = []\n for branch in branchList:\n # there should not be many candidate lines, so on-the-fly regex\n # compilation should never be a notable performance issue\n branchRegex = re.compile(\"/%s_\" % branch)\n match = branchRegex.search(line)\n if match == None:\n continue\n logger.debug(\"Detected branch %s in %s\" % (branch, CRM_CLIENT_LOG))\n ret.append(branch)\n if len(ret) > 1:\n logger.debug(\"Multiple branch candidates found in line: %s\" \\\n % \", \".join(ret))\n return ret\n\ndef readLines(filename):\n \"\"\"Reads text from a file as a list of lines\"\"\"\n if not os.path.isfile(filename):\n logger.warn(\"'%s' is not a valid file\" % filename)\n return None\n try:\n infile = open(filename)\n except (IOError, OSError) as e:\n logger.error(\"[%s] could not read %s\" % (str(e), filename))\n return None\n ret = infile.readlines()\n infile.close()\n return ret\n\ndef renameLog(dirPath, logfilename):\n \"\"\"\n Renames the build-log.txt file to build-log-[0-9]+.txt where\n the number is one larger than the highest existing log number.\n \"\"\"\n filename = logfilename + \".txt\"\n buildLog = os.path.join(LOGFILE_ROOT, filename)\n if not os.path.exists(buildLog):\n logger.debug(\"No previous build log found @%s\" % buildLog)\n return\n # glob will handle the unix-path '/' here; don't need to use os.path.join\n dirList = glob.glob(dirPath + \"/%s-*[0-9].txt\"%logfilename)\n if len(dirList) > 0:\n dirList.sort()\n lastLog = dirList[-1]\n logRegex = re.compile(\"%s-([0-9]+)\\\\.txt\"%re.escape(logfilename))\n match = logRegex.search(lastLog)\n # this should always match, based on our glob string\n assert match != None, \"BUG: %s did not handle %s correctly\" % \\\n (__file__, lastLog)\n logNum = int(match.group(1))\n logNum += 1\n newLogName = ((\"%s-\" + str(logNum).zfill(3) + \".txt\")%logfilename)\n else:\n newLogName = \"%s-001.txt\"% logfilename\n logger.info(\"Log: %s\" % newLogName)\n try:\n fin = open(buildLog, 'rb')\n fin.close()\n os.rename(buildLog, os.path.join(LOGFILE_ROOT, newLogName))\n except (IOError, OSError) as e:\n logger.error(\"[%s] could not open %s\" % (str(e), buildLog))\n\ndef updateLog():\n \"\"\"Updates the unified build log if a build-log.txt file exists\"\"\"\n sconsLog = os.path.join(LOGFILE_ROOT, BUILD_LOG_PREFIX + \".txt\")\n fullLog = os.path.join(LOGFILE_ROOT, UNIFIED_LOG + \".txt\")\n\n if(os.path.isfile(sconsLog) and os.path.isfile(fullLog)):\n concatLogs(sconsLog, fullLog)\n\ndef generateReport(reportFiles):\n \"\"\"Generates a mapping report for the given files\"\"\"\n for f in reportFiles:\n filePath = os.path.join(BUILD_ROOT, f)\n reportCmd = \"python %s %s\" % (REPORT_GENERATOR_SCRIPT, filePath)\n logger.debug(\"Mapreport command: %s\" % reportCmd)\n retCode = call(reportCmd, shell = True)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"failure occured while generating '%s' report\" % f)\n return retCode\n return ERROR_OK #pylint: disable=E0602\n\n# takes string(filename), dict(params), optional list(args)\ndef buildCmd(filename, params, args = None, mapreportFiles = None):\n \"\"\"Calls the build.cmd or build.sh batch/shell file.\"\"\"\n args = args or []\n mapreportFiles = mapreportFiles or []\n assert params.get(\"CHIPSET\") != None, \"params must contain CHIPSET\"\n buildScript = \"build\" + (\".cmd\" if isWindows() else \".sh\")\n buildScriptPath = os.path.join(getSConsDir(), SCONS_BUILD_PATH, buildScript)\n buildCommandParts = [ buildScriptPath ]\n buildCommandParts.extend(args)\n for param in params:\n buildCommandParts.append(\"%s=%s\" % (param, params[param]))\n buildCommandParts.append(filename) # finally, add the target file\n buildCommand = \" \".join(buildCommandParts)\n # the target.scons script is located within build/ms, so cd there\n buildDir = os.path.join(os.getcwd(), BUILD_PATH)\n if not os.path.exists(buildDir):\n logger.error(\"%s does not exist; cannot trigger build\" % buildDir)\n return ERROR_BAD_PATH #pylint: disable=E0602\n curDir = os.getcwd()\n try:\n os.chdir(buildDir)\n# logger.debug(\"pwd = %s\" % os.getcwd())\n logger.debug(\"build command: %s\" % buildCommand)\n logger.info(\"\\n\\nStarting to build %s ...\\n\\n\" % filename)\n sys.stdout.flush() # ensure that all messages are printed before start\n if not FAST_BUILD:\n # give time for the build command to be seen by the user\n time.sleep(SHORT_DELAY)\n retCode = call(buildCommand, shell = True)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"build command encountered errors: %s\" % buildCommand)\n return retCode\n if len(mapreportFiles) == 0:\n return ERROR_OK #pylint: disable=E0602\n # if mapreportFiles has elements, we have 1+ report file(s) to populate\n retCode = generateReport(mapreportFiles)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"error during mapreport generation\")\n finally:\n os.chdir(curDir)\n return retCode\n\ndef recompileTargetFiles(config, params, args):\n \"\"\"Triggers recompilation of key binaries from binaries, not source\"\"\"\n logger.info(\"\\n\\nRecompile %s ...\\n\\n\" % os.getcwd())\n buildId = params.get(\"BUILD_ID\")\n assert buildId != None, \"params must contain BUILD_ID\"\n targetFiles = config[\"files\"]\n filesToRebuild = selectFiles(config, recompile = True)\n for f in filesToRebuild:\n artifacts = targetFiles[f][\"artifacts\"]\n for artifact in artifacts:\n artifactPath = getBinDir(buildId, artifact)\n logger.info(\"Removing artifact: %s\" % artifactPath)\n if not os.path.isfile(artifactPath):\n logger.warn(\"Cannot remove %s (artifact not found in bin dir)\" \\\n % artifact)\n continue\n try:\n os.remove(artifactPath)\n except (IOError, OSError) as e:\n logger.error(\"[%s] failed to remove artifact: %s\" \\\n % (str(e), artifactPath))\n return ERROR_SYS_ERR #pylint: disable=E0602\n retCode = buildTargetFiles(config, params, args, \\\n fileList = filesToRebuild, checkArtifacts = True, \\\n ignoreParams = True)\n return retCode\n\ndef cleanpack(branch, config, params, args):\n \"\"\"Creates HY11 & HK11 builds from binaries\"\"\"\n logger.info(\"\\n\\nIn cleanpack ...\\n\\n\")\n assert cleanpack_tz != None, \"cleanpack module not available\"\n chipset = params.get(\"CHIPSET\")\n assert chipset != None, \"params must contain CHIPSET\"\n # TODO - consider adding logic to detect if previous build artifacts are\n # present and squash them to ensure that nothing improper gets packed into\n # the build\n retCode = 0\n for i in range(3):\n stepArgs = list(args) # copy original args so we don't overwrite them\n if i == 0 or i == 2:\n # first and last build are normal\n stepName = \"build\" if i == 0 else \"pack\"\n elif i == 1:\n # second build strips\n stepArgs.extend(CLEANPACK_ARGS)\n stepName = \"strip\"\n fileList = selectFiles(config, strip = True, firstBuild = (i == 0))\n # check artifacts only for first & third builds (i = 0 & i = 2)\n # 'strip' omits files that do not apply to cleanpack builds, but it is\n # entirely redundant as we have already explicitly selected our files\n retCode = buildTargetFiles(config, params, stepArgs, fileList, \\\n checkArtifacts = (i != 1), ignoreParams = True)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"build failure detected in %s step\" % stepName)\n return retCode\n if i == 0:\n retCode = copyOutput(TEMP_DIR)\n if retCode != ERROR_OK: #pylint: disable=E0602\n return retCode\n retCode = cleanpack_tz.cleanpack(branch, chipset, PACKAGE_ROOT)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"Failed in cleanpack_tz\")\n return retCode\n os.chdir(os.path.join(HY11_DIR, \"trustzone_images\"))\n retCode = recompileTargetFiles(config, params, args)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"Failed to recompile HY11\")\n return retCode\n os.chdir(os.path.join(HK11_DIR, \"trustzone_images\"))\n retCode = recompileTargetFiles(config, params, args)\n if retCode != ERROR_OK: #pylint: disable=E0602\n logger.error(\"Failed to recompile HK11\")\n return retCode\n return ERROR_OK #pylint: disable=E0602\n\ndef copyOutput(dest):\n \"\"\"Copies some essential files for cleanpack to HY11 & HK11 folders\"\"\"\n for copyTarget in CLEANPACK_COPY_TARGETS:\n srcPath = os.path.join(PACKAGE_ROOT, copyTarget)\n destPath = os.path.join(dest, copyTarget)\n if os.path.exists(destPath):\n try:\n if os.path.isdir(destPath):\n shutil.rmtree(destPath)\n else:\n os.remove(destPath)\n except (IOError, OSError) as e:\n logger.error(\"[%s] failed to remove existing dir: %s\" \\\n % (str(e), destPath))\n return ERROR_SYS_ERR #pylint: disable=E0602\n logger.info(\"Copy %s -> %s\" % (srcPath, destPath))\n try:\n if os.path.isdir(srcPath):\n shutil.copytree(srcPath, destPath, symlinks = True)\n else:\n shutil.copy2(srcPath, destPath)\n except (IOError, OSError) as e:\n logger.error(\"[%s] failed to copy %s -> %s\" \\\n % (str(e), srcPath, destPath))\n return ERROR_SYS_ERR #pylint: disable=E0602\n return ERROR_OK #pylint: disable=E0602\n\ndef selectFiles(config, strip = False, recompile = False, firstBuild = True):\n \"\"\"Determines which files to build based on the input arguments.\n Ignores disabled files.\"\"\"\n targetFiles = config[\"files\"]\n ret = []\n # use the target-specific file list for this target\n for f in targetFiles:\n fileMeta = targetFiles[f]\n if fileMeta[\"disable\"]:\n continue # skip disabled files\n elif strip and fileMeta[\"strip\"]:\n continue # omit stripped files\n elif (not firstBuild) and fileMeta[\"build-once\"]:\n continue # don't build 'build-once' files after the first time\n elif recompile and not fileMeta[\"recompile\"]:\n continue # skip files that should not be recompiled\n ret.append(f)\n return ret\n\n# It is the caller's responsibility what files are passed in. If no file list is\n# passed, all applicable files will be automatically selected from the current\n# target's config. For features like 'strip', selectFiles() should be explicitly\n# called by the calling function & have its result passed as 'fileList'.\ndef buildTargetFiles(config, params, args, fileList = None, \\\n checkArtifacts = True, ignoreParams = False, mapreport = False, \\\n testMode = False):\n \"\"\"Main build function\"\"\"\n # TODO - @redesign to reduce arguments & local vars\n #pylint: disable=R0913, R0914\n retCode = ERROR_OK #pylint: disable=E0602\n\n # Preserve the last log (if one exists) by renaming it from \"build-log.txt\"\n # to \"build-log-[0-9]+.txt\"\n renameLog(LOGFILE_ROOT, BUILD_LOG_PREFIX)\n\n if fileList == None:\n fileList = selectFiles(config)\n else:\n # Some of the given files may not be valid (listed) for this chipset.\n # Remove these to avoid unexpected build failures.\n fileList = [ alias for alias in fileList if alias in config[\"files\"] ]\n\n fileList.sort()\n\n try:\n retCode = builderLoop(fileList, config[\"files\"], params, \\\n args, checkArtifacts, ignoreParams, mapreport, \\\n testMode)\n except Exception: #pylint: disable=W0703\n traceback.print_exc()\n updateLog() # always capture last log, no matter what happened\n return ERROR_UNKNOWN #pylint: disable=E0602\n updateLog()\n return retCode\n\ndef concatLogs(file1, file2):\n '''Function to concatenate build logs into the Logfile'''\n\n try:\n with open(file2, 'a') as outfile:\n with open(file1) as infile:\n for lines in infile:\n outfile.write(lines)\n except IOError as e:\n logger.error(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n\n\n# TODO: @redesign to reduce arguments, local vars, branches, & lines\n# pylint: disable-msg=R0912,R0913,R0914,R0915\ndef builderLoop(fileList, fileMetadata, params, args, \\\n checkArtifacts, ignoreParams, mapreport, testMode):\n \"\"\"Main build worker function\"\"\"\n if params.get(\"CHIPSET\") == None:\n logger.error(\"params must contain CHIPSET\")\n return ERROR_INVALID_ARG # pylint: disable=E0602\n\n if params.get(\"BUILD_ID\") == None:\n logger.error(\"params must contain BUILD_ID\")\n return ERROR_INVALID_ARG # pylint: disable=E0602\n\n ret = ERROR_OK # pylint: disable=E0602\n if len(fileList) == 0:\n logger.warn(\"Nothing to build for {} ({}) - skipping\".format( \\\n params[\"CHIPSET\"], params[\"BUILD_ID\"]))\n return ret\n\n artifactsDir = getBinDir(params[\"BUILD_ID\"])\n\n # build up list of aliases that need to compile with/without USES_NO_CP\n # then make a single SCons invocation for each unified list\n #\n # look at fileMeta['internal-test'] to determine which list to associate the\n # image with\n #\n normal_images = []\n test_images = []\n for fileItem in fileList:\n if fileMetadata[fileItem][\"internal-test\"]:\n test_images.append(fileItem)\n else:\n normal_images.append(fileItem)\n\n errorAccumulator = ERROR_OK # pylint: disable=E0602\n while True: # enables goto-like behavior via 'break'\n if testMode and os.path.exists(artifactsDir):\n logger.debug(\"Removing %s\" % artifactsDir)\n try:\n shutil.rmtree(artifactsDir)\n except (IOError, OSError) as e:\n logger.error( \\\n \"Failed to remove artifacts dir in test mode: %s\" \\\n % artifactsDir)\n ret = ERROR_SYS_ERR #pylint: disable=E0602\n break\n\n fileParams = dict(params)\n fileArgs = list(args)\n if KEEP_GOING:\n fileArgs.append(\"-k\")\n\n logger.debug(\"Params: %s\" % \", \".join( \\\n [ \"%s=%s\" % (key, fileParams[key]) for key in fileParams]))\n logger.debug(\"Args: %s\" % \", \".join(fileArgs))\n\n updateLog()\n\n expectedArtifacts = set([])\n try:\n initialArtifactSet = getArtifacts(artifactsDir)\n except OSError:\n ret = ERROR_BAD_PATH # pylint: disable=E0602\n break\n\n if len(normal_images) > 0:\n logger.info(\"Building {}\".format(\" \".join(normal_images)))\n mapreportFiles = []\n if mapreport:\n for imageName in normal_images:\n mapreportFiles.extend( \\\n list(fileMetadata[imageName][\"mapreport\"]))\n ret = buildCmd(\" \".join(normal_images), fileParams, fileArgs, \\\n mapreportFiles = mapreportFiles)\n if ret != ERROR_OK: #pylint: disable=E0602\n logger.error(\"error %s while building %s\" \\\n % (strerr(ret), \" \".join(normal_images)))\n if not KEEP_GOING:\n break\n else:\n errorAccumulator |= ret\n\n if checkArtifacts:\n try:\n validateArtifacts(artifactsDir, normal_images, \\\n fileMetadata, expectedArtifacts)\n except ArtifactNotFoundException as e:\n logger.error(str(e))\n if not KEEP_GOING:\n ret = ERROR_INVALID_ARTIFACT #pylint: disable=E0602\n break\n else:\n errorAccumulator |= ERROR_INVALID_ARTIFACT # pylint: disable=E0602,C0301\n except (IOError, OSError) as e:\n logger.debug(str(e))\n logger.error( \\\n # pylint: disable=C0301\n \"Artifact dir {} does not exist or is inaccessible\".format( \\\n artifactsDir))\n ret = ERROR_BAD_PATH #pylint: disable=E0602\n break\n\n if len(test_images) > 0:\n logger.info(\"Building {}\".format(\" \".join(test_images)))\n # if not ignoreParams: # must not add this for --recompile, etc\n # fileParams[\"USES_FLAGS\"] = \"USES_NO_CP\"\n mapreportFiles = []\n if mapreport:\n for imageName in test_images:\n mapreportFiles.extend( \\\n list(fileMetadata[imageName][\"mapreport\"]))\n ret = buildCmd(\" \".join(test_images), fileParams, fileArgs, \\\n mapreportFiles = mapreportFiles)\n if ret != ERROR_OK: #pylint: disable=E0602\n logger.error(\"error %s while building %s\" \\\n % (strerr(ret), \" \".join(test_images)))\n if not KEEP_GOING:\n break\n else:\n errorAccumulator |= ret\n\n if checkArtifacts:\n try:\n validateArtifacts(artifactsDir, test_images, fileMetadata, \\\n expectedArtifacts)\n except ArtifactNotFoundException as e:\n logger.error(str(e))\n if not KEEP_GOING:\n ret = ERROR_INVALID_ARTIFACT #pylint: disable=E0602\n break\n else:\n errorAccumulator |= ERROR_INVALID_ARTIFACT # pylint: disable=E0602,C0301\n except (IOError, OSError) as e:\n logger.debug(str(e))\n logger.error( \\\n # pylint: disable=C0301\n \"Artifact dir {} does not exist or is inaccessible\".format( \\\n artifactsDir))\n ret = ERROR_BAD_PATH #pylint: disable=E0602\n break\n\n try:\n artifactSet = getArtifacts(artifactsDir)\n except OSError:\n ret = ERROR_BAD_PATH # pylint: disable=E0602\n break\n\n # Check for extra artifacts that were just generated but not expected\n newArtifacts = artifactSet.difference(initialArtifactSet)\n unexpectedArtifacts = newArtifacts.difference(expectedArtifacts)\n\n if checkArtifacts and len(unexpectedArtifacts) > 0:\n extraList = list(unexpectedArtifacts)\n extraList.sort()\n msg = \"Extra artifacts: {}\".format(\", \".join(extraList))\n if FORBID_UNDOCUMENTED_ARTIFACTS:\n logger.error(msg)\n ret = ERROR_INVALID_ARTIFACT #pylint: disable=E0602\n break\n else:\n logger.warn(msg)\n\n break # always break\n\n if KEEP_GOING and errorAccumulator != ERROR_OK: # pylint: disable=E0602\n return errorAccumulator\n\n return ret\n# pylint: enable-msg=R0912,R0913,R0914,R0915\n\ndef getArtifacts(artifactsDir, mustExist = False):\n \"\"\"Returns a set of artifacts in the given dir, if it exists. If\n 'mustExist' is True (default False), raises IOError if artifactsDir is\n non-existent.\"\"\"\n artifactSet = set([])\n\n # if these checks fails, the resulting IOError/OSError should be caught by\n # the caller\n if not os.path.exists(artifactsDir):\n if mustExist:\n raise IOError(\"Artifact output dir '{}' does not exist\".format( \\\n artifactsDir))\n else:\n logger.debug(\"Artifact dir {} does not exist\".format(artifactsDir))\n return artifactSet\n\n try:\n artifactSet = set([ uri for uri in os.listdir(artifactsDir) \\\n if os.path.isfile(os.path.join(artifactsDir, uri)) ])\n except OSError:\n logger.debug(traceback.format_exc())\n logger.error( \\\n \"Artifact dir {} exists but is inaccessible\".format( \\\n artifactsDir))\n raise\n\n return artifactSet\n\ndef validateArtifacts(artifactsDir, imageList, imageMetadata, \\\n expectedArtifacts = None):\n \"\"\"Validates that all expected artifacts are present for each of the SCons\n images in the given 'imageList'. If a non-None 'expectedArtifacts' set is\n provided, also updates it with the expected artifacts for each image.\"\"\"\n if expectedArtifacts == None:\n expectedArtifacts = set([])\n\n # Any IOError/OSError is passed on to the caller\n artifactSet = getArtifacts(artifactsDir, mustExist = True)\n\n # Check for missing artifacts. Note that the set\n # constructed above only contains valid files.\n for imageName in imageList:\n expected = getExpectedArtifacts(imageName, imageMetadata)\n expectedArtifacts.update(expected)\n if len(expected) == 0:\n continue\n\n # We diff against the full artifact set since some of the artifacts\n # might already exist from a previous build.\n missing = expected.difference(artifactSet)\n logger.info(\"Found {} of {} expected artifacts for {}\".format( \\\n len(expected) - len(missing), len(expected), imageName))\n if len(missing) > 0:\n missingList = list(missing)\n missingList.sort()\n raise ArtifactNotFoundException(\"Missing {} artifacts: {}\".format( \\\n imageName, \", \".join(missingList)))\n\ndef getExpectedArtifacts(filename, fileMetadata, dependencyList = None):\n \"\"\"Returns the expected artifacts for a file (current + dependencies)\"\"\"\n dependencyList = dependencyList or []\n if len(dependencyList) == 0:\n logger.debug(\"%s has no listed dependencies\" % filename)\n else:\n logger.debug(\"%s has listed dependencies: %s\" \\\n % (filename, \", \".join(dependencyList)))\n artifacts = set(fileMetadata[filename][\"artifacts\"])\n for dep in dependencyList:\n artifacts.update(set(fileMetadata[dep][\"artifacts\"]))\n return artifacts\n\ndef getBinDir(buildId, artifact = None):\n \"\"\"Gets the bin output folder for the current target\"\"\"\n buildDir = os.path.join(os.getcwd(), BUILD_PATH)\n if artifact == None:\n return os.path.join(buildDir, 'bin', buildId)\n return os.path.join(buildDir, 'bin', buildId, artifact)\n\ndef getDefaultConfig():\n \"\"\"Returns the 'default' config file based on the first candidate in the\n CONFIG_FILES list that exists locally.\"\"\"\n assert len(CONFIG_FILES) > 0, \\\n \"BUG: Must have at least one default config file\"\n for config in CONFIG_FILES:\n if os.path.isfile(config):\n logger.debug(\"build config: {}\".format(config))\n return config\n logger.warn(\"No valid config file found. Expecting {}\".format( \\\n CONFIG_FILES[0]))\n return CONFIG_FILES[0]\n\nclass PassthroughOptionParser(optparse.OptionParser):\n \"\"\"Extends OptionParser to support pass-through of unknown args to SCons\"\"\"\n def _process_args(self, largs, rargs, values):\n originalArgs = list(rargs)\n while rargs:\n try:\n logV(\"Remaining args: %s\" % \", \".join(rargs))\n optparse.OptionParser._process_args(self, largs, rargs, values)\n except (optparse.BadOptionError, optparse.AmbiguousOptionError) \\\n as e:\n logV(\"parse error on '%s'\" % e.opt_str)\n # The OptionParser may not return the raw argument (eg '-j4'\n # will be declared invalid and returned as '-j'), so we have to\n # determine which original argument caused the problem & add it\n # to largs instead of e.opt_str. We are guaranteed that\n # _process_args has removed at least 1 item from rargs UNLESS\n # we had a special '=' argument where the value was added as an\n # additional rarg (eg --jobs=4). We check for this corner case\n # first, then decrement if it does not appear to be the case.\n failureIndex = len(originalArgs) - len(rargs) - 1\n paramValue = \"\"\n try:\n # doubles as an implicit\n # \"if failureIndex + 1 < len(originalArgs)\"\n badArg = originalArgs[failureIndex + 1]\n if badArg[0] == \"-\":\n # doubles as an implicit \"if '=' in badArg\"\n paramValue = \"=\".join(badArg.split(\"=\")[1:])\n except IndexError:\n badArg = originalArgs[failureIndex]\n logV(\"paramValue = '%s'; rargs = [ %s ]\" \\\n % (paramValue, \", \".join(rargs)))\n # paramValue will only match rargs[0] if that arg is (a) empty\n # or (b) the value of an invalid <key>=<value> param\n if len(rargs) > 0 and rargs[0] == paramValue:\n logV(\"Pop redundant arg '%s' from args list\" \\\n % rargs.pop(0))\n else:\n badArg = originalArgs[failureIndex]\n logger.debug(\"passthrough arg: %s (parsed as %s)\" \\\n % (badArg, e.opt_str))\n largs.append(badArg)\n\ndef parseArguments():\n \"\"\"Basic argument definition & parsing\"\"\"\n parser = PassthroughOptionParser()\n parser.add_option(\"-b\", \"--branch\", \\\n action = \"store\", \\\n type = \"string\", \\\n dest = \"branch\", \\\n help = \"Build this branch\")\n parser.add_option(\"--cfg\", \"--config-file\", \\\n action = \"store\", \\\n type = \"string\", \\\n dest = \"config_file\", \\\n default = getDefaultConfig(), \\\n help = \"Specify the build config xml file\")\n parser.add_option(\"-c\", \"--clean\", \\\n action = \"store_true\", \\\n dest = \"clean\", \\\n default = False, \\\n help = \"Clean the build ONLY\")\n parser.add_option(\"--cleanpack\", \\\n action = \"store_true\", \\\n dest = \"clean_pack\", \\\n default = False, \\\n help=\"Clean and Pack\")\n parser.add_option(\"--cnb\", \"--clean-and-build\", \\\n action = \"store_true\", \\\n dest = \"clean_build\", \\\n default = False, \\\n help = \"Clean and Build\")\n parser.add_option(\"--cbt\", \"--custom-build-targets\", \\\n action = \"store\", \\\n type = \"string\", \\\n dest = \"custom_build_targets\", \\\n #pylint: disable=C0301\n help = \"Request build of the given SCons target(s) / alias(es), even if they don't appear in build_config.xml\")\n parser.add_option(\"-d\", \"--default\", \\\n action = \"store_true\", \\\n default = False, \\\n dest = \"use_default_target\", \\\n help = \"Build only the branch's default chipset\")\n parser.add_option(\"--detect-branch\", \\\n action = \"store_true\", \\\n default = False, \\\n dest = \"detect_branch\", \\\n help = \"Try to detect current branch, print, & exit\")\n parser.add_option(\"--dumpenv\", \\\n action = \"store_true\", \\\n default = False, \\\n dest = \"dump_environment\", \\\n help = \"Dump the environment to a shell script\")\n parser.add_option(\"--enable_cc\", \\\n action=\"store_true\", \\\n dest=\"code_coverage\", \\\n default=False, \\\n #pylint: disable=C0301\n help=\"Build a code coverage enabled image.This is applicable for tz tz_with_test and tztestexec image only for now.\")\n parser.add_option(\"--fast\", \\\n action = \"store_true\", \\\n default = False,\n dest = \"fast_build\", \\\n help = \"Suppress scripted prompts & delays\")\n parser.add_option(\"-k\", \"--keep-going\", \\\n action = \"store_true\", \\\n default = False, \\\n dest = \"keep_going\", \\\n # pylint: disable=C0301\n help = \"Attempt to keep going despite build errors. Still (eventually) returns a non-zero exit code on failure.\"\"\")\n parser.add_option(\"--list-targets\", \\\n action = \"store_true\", \\\n default = False, \\\n dest = \"list_targets\", \\\n help = \"List supported targets for the current branch & exit\")\n parser.add_option(\"-m\", \"--map-report\", \\\n action = \"store_true\", \\\n dest = \"map_report\", \\\n default = DEFAULT_MAPREPORT, \\\n help = \"Mapreport\")\n parser.add_option(\"-r\", \"--recompile\", \\\n action = \"store_true\", \\\n dest = \"recompile_only\", \\\n default = False, \\\n help = \"Recompile only\")\n parser.add_option(\"--sa\", \"--static-analysis\",\n action = \"store_true\", \\\n dest = \"enable_static_analysis\", \\\n default = False, \\\n help = \"Enable KLOCKWORK buildspec generation & basic SA checks\")\n parser.add_option(\"--scons-args\", \"--args\", \\\n action = \"store\", \\\n type = \"string\", \\\n dest = \"scons_args\", \\\n #pylint: disable=C0301\n help = \"Explicitly specify custom arguments to be passed down to SCons.\")\n parser.add_option(\"--test-mode\", \\\n action = \"store_true\", \\\n dest = \"test_mode\", \\\n default = False, \\\n #pylint: disable=C0301\n help = \"Remove the current target's entire bin dir before building each file in order to verify that dependencies & artifacts are exactly as expected.\")\n parser.add_option(\"-v\", \"--verbose\", \\\n action = \"store_true\", \\\n dest = \"verbose\", \\\n default = False, \\\n help = \"Verbose output\")\n return parser.parse_args()\n\ndef parseExtraArguments(args, branchConfig):\n \"\"\"Performs additional parsing of command line args like requested files\"\"\"\n ret = {\n \"args\" : [],\n \"files\" : [],\n \"params\" : {},\n }\n # build a list of all <alias>es from all targets to validate what is\n # considered a 'file' vs a pass-through argument. Acceptable files will be\n # validated on each target once targets are determined, but this parsing\n # can include CHIPSET=<chipset>, so we cannot perform this validation yet\n allFiles = set([])\n for target in branchConfig[\"targets\"]:\n targetFiles = set(branchConfig[\"targets\"][target][\"files\"].keys())\n allFiles.update(targetFiles)\n for arg in args:\n if len(arg.strip()) == 0:\n logger.warn(\"Received empty arg '%s'\" % arg)\n continue\n logV(\"Parse arg '%s\" % arg)\n if arg in allFiles:\n logger.debug(\"Found supported file '%s' in command line args\" % arg)\n if arg in ret[\"files\"]:\n logger.critical(\"file '%s' is provided twice\" % arg)\n sys.exit(ERROR_INVALID_ARG) #pylint: disable=E0602\n ret[\"files\"].append(arg)\n elif arg.startswith(\"-\"):\n # arguments must start with - or --\n # two-part arguments like \"-j 1\" are no longer supported since users\n # can too-easily pass untracked SCons images (eg skeleton) as extra\n # arguments, causing unexpected/confusing build behavior\n # Any custom command-line arguments to SCons can be passed with\n # --scons-args=\"<extra args>\"\n logger.debug( \\\n \"Passing '%s' from command line directly to build script\" \\\n % arg)\n ret[\"args\"].append(arg)\n elif \"=\" in arg:\n # param notation ignores any argument prefixed by '-' or '--' since\n # these are likely passthrough arguments\n paramParts = arg.split(\"=\")\n key = paramParts[0]\n value = \"=\".join(paramParts[1:])\n logger.debug(\"Found param %s = %s in command line args\" \\\n % (key, value))\n existingParam = ret[\"params\"].get(key)\n if existingParam != None:\n logger.critical(\"param '%s' is provided twice\" % key)\n sys.exit(ERROR_INVALID_ARG) #pylint: disable=E0602\n ret[\"params\"][key] = value\n else:\n msg = \"\"\"Invalid argument: '{0}'\n\nThe given argument is not recognized as a known SCons image or command-line\nparameter. If it was intended to be a SCons image, please add it to the build\nconfig or use:\n--cbt=\"{0}\"\n\nIf it was intended to be a whole or partial SCons command-line argument, use:\n--scons-args=\"{0}\".\"\"\".format(arg)\n logger.critical(msg)\n sys.exit(ERROR_INVALID_ARG) #pylint: disable=E0602\n return ret\n\ndef addCustomBuildTargets(options, branchConfig, buildInput):\n \"\"\"Adds custom build targets to the list of command line files. Creates\n dummy references for the build targets (as if they had entries within\n build_config.xml) if they are not already present. Also enforces strict\n dependency in the order that custom build targets are given. Applies to\n all chipsets, although only the specified chipset(s) will be built.\"\"\"\n if not options.custom_build_targets:\n return\n logger.debug(\"Custom build targets: %s\" % options.custom_build_targets)\n customTargets = options.custom_build_targets.split()\n for chipset in branchConfig[\"targets\"]:\n chipsetConfig = branchConfig[\"targets\"][chipset]\n for i in range(len(customTargets)):\n buildTarget = customTargets[i]\n if buildTarget in chipsetConfig[\"files\"]:\n continue # an entry is already present for this chipset\n dummyEntry = BuildConfigParser.fileTemplate()\n # make this entry explicitly dependent on all previous entries to\n # ensure that we never violate an implicit dependency\n dummyEntry[\"dependencies\"] = customTargets[:i]\n chipsetConfig[\"files\"][buildTarget] = dummyEntry.dict()\n buildInput[\"files\"].extend([ buildTarget \\\n for buildTarget in options.custom_build_targets.split() \\\n if buildTarget not in buildInput[\"files\"] ])\n\ndef printParams(params, title = None, exclude = None):\n \"\"\"Prints the given params (KEY=VALUE)\"\"\"\n exclude = exclude or []\n if title == None:\n title = \"Parameters:\"\n else:\n title = \"%s parameters:\" % title.capitalize()\n logger.info(title)\n keys = params.keys()\n keys.sort()\n for key in keys:\n if key in exclude:\n continue\n logger.info(\"%s: %s\" % (key, params[key]))\n sys.stdout.flush()\n\ndef getSConsDir():\n \"\"\"Returns the current SCons dir\"\"\"\n return os.path.join(os.getcwd(), TOOLS_SCONS_PATH)\n\ndef printDiagnosticInfo(branch, targets, options, buildInput, \\\n branchWarning = False):\n \"\"\"Prints essential build info before beginning build\"\"\"\n logger.info(\"\")\n logger.info(\"Diagnostic info\")\n logger.info(SEPARATOR)\n logger.info(\"Platform: %s\" % sys.platform)\n logger.info(\"Python version: %s\" % sys.version)\n logger.info(\"Current directory: %s\" % os.getcwd())\n logger.info(\"Build root: %s\" % BUILD_ROOT)\n logger.info(\"SCons root: %s\" % getSConsDir())\n logger.info(\"OS version : %s\" % (platform.system()+\" \"+platform.release()))\n logger.info(\"Host Name : %s\" % socket.gethostname())\n logger.info(\"Build Command Line: %s\" % \" \".join(sys.argv))\n logger.info(SEPARATOR)\n logger.info(\"Command line options:\")\n logger.info(\"Clean: %s\" % str(options.clean))\n logger.info(\"Clean and build: %s\" % str(options.clean_build))\n logger.info(\"Clean and pack: %s\" % str(options.clean_pack))\n logger.info(\"Use default target: %s\" % str(options.use_default_target))\n logger.info(\"Keep going: %s\" % str(options.keep_going))\n logger.info(\"Static analysis: %s\" % str(options.enable_static_analysis))\n logger.info(\"Mapreport: %s\" % str(options.map_report))\n logger.info(\"Verbose: %s\" % str(options.verbose))\n logger.info(SEPARATOR)\n if len(buildInput[\"files\"]) > 0:\n logger.info(\"Command line files:\")\n logger.info(\" \".join(buildInput[\"files\"]))\n logger.info(SEPARATOR)\n if len(buildInput[\"params\"]) > 0:\n printParams(buildInput[\"params\"], \"command line\", \\\n exclude = [ \"CHIPSET\" ])\n logger.info(SEPARATOR)\n if len(buildInput[\"args\"]) > 0:\n logger.info(\"Additional command line arguments:\")\n logger.info(\" \".join(buildInput[\"args\"]))\n logger.info(SEPARATOR)\n logger.info(\"BRANCH:\\t\\t%s\" % branch)\n targetCount = len(targets)\n logger.info(\"%d %s:\\t%s\" % (targetCount, \\\n \"CHIPSET\" if targetCount == 1 else \"CHIPSETS\", \\\n \", \".join(targets)))\n logger.info(SEPARATOR)\n if branchWarning:\n logger.info(\"\")\n logger.info(\"Auto-detected branch '%s'\" % branch)\n logger.info(\"If this is not correct, please explicitly give -b <branch>\") #pylint: disable=C0301\n if targetCount > 1:\n logger.info(\"\")\n logger.info(\"Multiple chipsets (%d) will be built for %s\" \\\n % (targetCount, branch))\n logger.info(\"\")\n logger.info(\"Preparing to build...\")\n logger.info(\"\")\n sys.stdout.flush()\n\ndef logV(msg):\n \"\"\"verbose module-level logging\"\"\"\n logger.log(VERBOSE, msg)\n\ndef delete_BuildProductFile():\n \"\"\"\" Function used to delete BuildProducts.txt file if build fails\"\"\"\n os.chdir(PACKAGE_ROOT)\n if os.path.isfile('BuildProducts.txt'):\n os.remove(\"BuildProducts.txt\")\n\ndef signal_handler(sig, frame): #pylint: disable=W0613\n \"\"\"Handles <Ctrl> + C\"\"\"\n logger.warn(\"Received <Ctrl> + C - killing all processes\")\n sys.exit(ERROR_INTERRUPTED) #pylint: disable=E0602\n\ndef setStaticAnalysisEnvironment(enable, buildInput):\n \"\"\"Set-up environment for static analysis enabled build\"\"\"\n if enable:\n pass # all SA features are enabled by default\n else:\n del os.environ[\"BUILDSPEC\"] # disable buildspec_builder (klockwork)\n if len(buildInput[\"files\"]) == 0:\n # full build; disable individual app SA checks\n DEFAULT_ARGS.append(\"tzbsp_enable_sa=0\") # disable SA checks\n\ndef setCodeCoverageEnvironment():\n \"\"\"Set-up environment for code coverage enabled build\"\"\"\n cctBinPath = os.environ[\"CCBIN\"]\n os.environ[\"BULLSEYE_PATH\"] = os.environ.get(\"LLVMBIN\")\n os.environ[\"LLVMBIN\"] = cctBinPath\n os.environ[\"COVFILE\"] = os.path.join(BUILD_ROOT, COV_FILE_NAME)\n os.environ[\"COVERR\"] = os.path.join(BUILD_ROOT, COV_LOG_FILE)\n DEFAULT_ARGS.extend([\"tzbsp_cc=1\", \"tzbsp_no_pimem=1\", \"tzbsp_quadmb=1\"])\n enableCodeCovCmd = str(os.path.join(cctBinPath, 'cov01')) + \" -1\"\n call(enableCodeCovCmd)\n\nif os.path.exists(UNIFIED_LOG+\".txt\"):\n renameLog(LOGFILE_ROOT, UNIFIED_LOG)\nhandler = logging.FileHandler(UNIFIED_LOG+\".txt\")\nhandler.setLevel(logging.DEBUG)\nlogger.addHandler(handler)\n\nBuildConfigParser.logger.addHandler(handler)\n\nif cleanpack_tz != None:\n cleanpack_tz.logger.addHandler(handler)\n\nif __name__ == '__main__':\n signal.signal(signal.SIGINT, signal_handler)\n try:\n EXIT_CODE = buildAll()\n except:\n delete_BuildProductFile()\n raise\n if EXIT_CODE != 0:\n delete_BuildProductFile()\n\n sys.exit(EXIT_CODE)\n\n", "sub_path": "office1/nhlos/trustzone_images/build/ms/build_all.py", "file_name": "build_all.py", "file_ext": "py", "file_size_in_byte": 70640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 171, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 175, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 193, "usage_type": "call"}, {"api_name": "BuildConfigParser.loadXml", "line_number": 196, "usage_type": "call"}, {"api_name": "BuildConfigParser.loadAliasMap", "line_number": 211, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 410, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 417, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 474, "usage_type": "call"}, {"api_name": "time.time", "line_number": 531, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 550, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 550, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 551, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 551, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 552, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 552, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 554, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 554, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 556, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 556, "usage_type": "name"}, {"api_name": "os.path.realpath", "line_number": 557, "usage_type": "call"}, {"api_name": "os.path", "line_number": 557, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 563, "usage_type": "call"}, {"api_name": "os.path", "line_number": 563, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom.Document", "line_number": 566, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 566, "usage_type": "name"}, {"api_name": "xml.dom.minidom.parseString", "line_number": 567, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 567, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 567, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 567, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 577, "usage_type": "call"}, {"api_name": "os.path", "line_number": 577, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 580, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 580, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 581, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 581, "usage_type": "name"}, {"api_name": "os.path.realpath", "line_number": 589, "usage_type": "call"}, {"api_name": "os.path", "line_number": 589, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 613, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 613, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 636, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 655, "usage_type": "call"}, {"api_name": "os.sysconf_names.has_key", "line_number": 665, "usage_type": "call"}, {"api_name": "os.sysconf_names", "line_number": 665, "usage_type": "attribute"}, {"api_name": "os.sysconf", "line_number": 667, "usage_type": "call"}, {"api_name": "os.environ.has_key", "line_number": 668, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 668, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 670, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 683, "usage_type": "call"}, {"api_name": "os.path", "line_number": 683, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 684, "usage_type": "call"}, {"api_name": "os.path", "line_number": 684, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 695, "usage_type": "call"}, {"api_name": "os.path", "line_number": 695, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 695, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 717, "usage_type": "call"}, {"api_name": "os.path", "line_number": 717, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 722, "usage_type": "call"}, {"api_name": "os.path", "line_number": 722, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 792, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 805, "usage_type": "call"}, {"api_name": "os.path", "line_number": 805, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 823, "usage_type": "call"}, {"api_name": "os.path", "line_number": 823, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 824, "usage_type": "call"}, {"api_name": "os.path", "line_number": 824, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 828, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 832, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 832, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 846, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 846, "usage_type": "call"}, {"api_name": "os.path", "line_number": 846, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 852, "usage_type": "call"}, {"api_name": "os.path", "line_number": 852, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 853, "usage_type": "call"}, {"api_name": "os.path", "line_number": 853, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 855, "usage_type": "call"}, {"api_name": "os.path", "line_number": 855, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 861, "usage_type": "call"}, {"api_name": "os.path", "line_number": 861, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 864, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 877, "usage_type": "call"}, {"api_name": "os.path", "line_number": 877, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 885, "usage_type": "call"}, {"api_name": "os.path", "line_number": 885, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 885, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 886, "usage_type": "call"}, {"api_name": "os.path", "line_number": 886, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 889, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 891, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 895, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 895, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 898, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 899, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 910, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 915, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 925, "usage_type": "call"}, {"api_name": "os.path", "line_number": 925, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 930, "usage_type": "call"}, {"api_name": "cleanpack_tz2.cleanpack", "line_number": 972, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 976, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 976, "usage_type": "call"}, {"api_name": "os.path", "line_number": 976, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 981, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 981, "usage_type": "call"}, {"api_name": "os.path", "line_number": 981, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 991, "usage_type": "call"}, {"api_name": "os.path", "line_number": 991, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 992, "usage_type": "call"}, {"api_name": "os.path", "line_number": 992, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 993, "usage_type": "call"}, {"api_name": "os.path", "line_number": 993, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 995, "usage_type": "call"}, {"api_name": "os.path", "line_number": 995, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 996, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 998, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 1005, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1005, "usage_type": "attribute"}, {"api_name": "shutil.copytree", "line_number": 1006, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 1008, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 1064, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1119, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 1122, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1261, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1261, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 1270, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 1271, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1271, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1271, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 1273, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1326, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1326, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 1326, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1328, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1328, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1329, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1329, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 1337, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1337, "usage_type": "attribute"}, {"api_name": "optparse.OptionParser", "line_number": 1344, "usage_type": "attribute"}, {"api_name": "optparse.OptionParser._process_args", "line_number": 1351, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 1351, "usage_type": "attribute"}, {"api_name": "optparse.BadOptionError", "line_number": 1352, "usage_type": "attribute"}, {"api_name": "optparse.AmbiguousOptionError", "line_number": 1352, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1517, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1541, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1554, "usage_type": "call"}, {"api_name": "BuildConfigParser.fileTemplate", "line_number": 1573, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 1596, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 1596, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1600, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1600, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 1600, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 1608, "usage_type": "attribute"}, {"api_name": "sys.version", "line_number": 1609, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 1610, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 1613, "usage_type": "call"}, {"api_name": "platform.release", "line_number": 1613, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 1614, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1615, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 1656, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 1656, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 1664, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 1665, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1665, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 1666, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1671, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 1678, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 1685, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 1686, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 1686, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 1687, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 1688, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1688, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1688, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 1689, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1689, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1689, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1691, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1691, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 1692, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1694, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1694, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 1696, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 1697, "usage_type": "attribute"}, {"api_name": "BuildConfigParser.logger.addHandler", "line_number": 1700, "usage_type": "call"}, {"api_name": "BuildConfigParser.logger", "line_number": 1700, "usage_type": "attribute"}, {"api_name": "cleanpack_tz2.logger.addHandler", "line_number": 1703, "usage_type": "call"}, {"api_name": "cleanpack_tz2.logger", "line_number": 1703, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 1706, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 1706, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1715, "usage_type": "call"}]} +{"seq_id": "182388393", "text": "\"\"\"\nCares about handling and execution warnings.\n\n\"\"\"\nfrom pkg_resources import parse_version\nimport sphinx\nfrom sphinxcontrib.needs.filter_common import filter_needs\n\nsphinx_version = sphinx.__version__\nif parse_version(sphinx_version) >= parse_version(\"1.6\"):\n from sphinx.util import logging\nelse:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef process_warnings(app, exception):\n \"\"\"\n Checks the configured warnings.\n\n This func gets called by the latest sphinx-event, so that really everything is already done.\n\n :param app: application\n :param exception: raised exceptions\n :return:\n \"\"\"\n\n # We cget called also if an exception occured during build\n # In this case the build is already broken and we do not need to check anything.\n if exception is not None:\n return\n\n env = app.env\n # If no needs were defined, we do not need to do anything\n if not hasattr(env, \"needs_all_needs\"):\n return\n\n # Check if warnings already got executed.\n # Needed because the used event gets executed multiple times, but warnings need to be checked only\n # on first execution\n if hasattr(env, \"needs_warnings_executed\") and env.needs_warnings_executed is True:\n return\n\n env.needs_warnings_executed = True\n\n needs = env.needs_all_needs\n\n warnings = getattr(app.config, 'needs_warnings', {})\n\n with logging.pending_logging():\n logger.info('\\nChecking sphinx-needs warnings')\n warning_raised = False\n for warning_name, warning_filter in warnings.items():\n result = filter_needs(needs.values(), warning_filter)\n if len(result) == 0:\n logger.info(' {}: passed'.format(warning_name))\n else:\n need_ids = [x['id'] for x in result]\n logger.info(' {}: failed'.format(warning_name))\n logger.info(' \\t\\tfailed needs: {} ({})'.format(len(need_ids), ', '.join(need_ids)))\n logger.info(' \\t\\tused filter: {}'.format(warning_filter))\n warning_raised = True\n\n if warning_raised:\n logger.warning('Sphinx-Needs warnings were raised. See console / log output for details.')\n\n\n\n", "sub_path": "sphinxcontrib/needs/warnings.py", "file_name": "warnings.py", "file_ext": "py", "file_size_in_byte": 2212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sphinx.__version__", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pkg_resources.parse_version", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.pending_logging", "line_number": 51, "usage_type": "call"}, {"api_name": "sphinxcontrib.needs.filter_common.filter_needs", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "27178596", "text": "import logging\n\nimport requests\nfrom django.conf import settings\n\nfrom quota.exceptions import APIIntegrationException\n\nlogger = logging.getLogger(__name__)\n\n\nclass CryptoProviderClient(object):\n\n def __init__(self):\n \"\"\"\n Initialize client.\n\n :param host: hostname to connect to\n :type host: string\n :param verify_ssl: verify SSL certificates for HTTPS requests\n :type verify_ssl: bool\n\n \"\"\"\n self._base_url = 'https://{0}/query'.format(settings.CRYPTO_PROVIDER_HOST)\n self._api_key = settings.CRYPTO_PROVIDER_API_KEY\n\n def get_exchange_rates(self, from_currency, to_currency):\n\n params = {\n \"function\": \"CURRENCY_EXCHANGE_RATE\",\n \"from_currency\": from_currency,\n \"to_currency\": to_currency,\n \"apikey\": self._api_key\n }\n\n response = requests.get(self._base_url, params=params)\n if response.status_code != requests.codes.ok:\n logger.error(\n 'Got response from crypto provider: content: {} , status_code:{}'.format(response.content,\n response.status_code))\n raise APIIntegrationException(\"Integration Error with crypto provider {}\".format(response.content))\n else:\n logger.info(\n 'Got response from provider: content: {} , status_code:{}'.format(response.content,\n response.status_code))\n data = response.json()\n if \"Error Message\" in data:\n raise APIIntegrationException(\"Integration Error with crypto provider {}\".format(response.content))\n\n return data\n", "sub_path": "quota/clients.py", "file_name": "clients.py", "file_ext": "py", "file_size_in_byte": 1783, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.settings.CRYPTO_PROVIDER_HOST", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.settings.CRYPTO_PROVIDER_API_KEY", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 36, "usage_type": "attribute"}, {"api_name": "quota.exceptions.APIIntegrationException", "line_number": 40, "usage_type": "call"}, {"api_name": "quota.exceptions.APIIntegrationException", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "468746179", "text": "import re\nimport os\nimport copy\n\n\nTRAINING_PATH = os.path.join(\"..\", \"dataset\", \"TRAINING\")\n\nclass Node():\n def __init__(self, label = None, nuclearity = None, span= None, text = None, children=None, left_nuclearity=None, right_nuclearity=None):\n self._label = label # rel2par\n if not label:\n if nuclearity:\n import pdb; pdb.set_trace()\n else:\n self._label = \"Root\"\n else:\n try:\n from utils import map_to_cluster\n self._label = map_to_cluster(label)\n except ValueError:\n self._label = label\n self._nuclearity = nuclearity # Nucleus / Satellite\n self._text = text # empty if not a leaf\n self._span = span # list of string integers ([\"1\", \"20\"] or [\"4\"])\n\n self._children = [] # can be more than two, later we'll binarize\n if children:\n self._children = children\n\n self._full_span = [int(s) for s in self._span]\n if len(self._span) == 1:\n self._full_span = [int(self._span[0]), int(self._span[0]) + 1]\n else:\n self._full_span[1] += 1 # To match the format\n self._full_span = [s-1 for s in self._full_span] # to start by 0\n\n # assign after binarization\n self._left = None\n self._right = None\n\n self._left_nuclearity = left_nuclearity\n self._right_nuclearity = right_nuclearity\n\n def is_leaf(self):\n return len(self._span) == 1\n\n def __str__(self):\n \"\"\" output the node as in original out.dis format \"\"\"\n nuc_or_root = \"Root\" if self._nuclearity is None else self._nuclearity\n span_or_leaf = \"leaf\" if self.is_leaf() else \"span\"\n output = \"( {0} ({1} {2})\".format(nuc_or_root, span_or_leaf,\n \" \".join(self._span))\n if nuc_or_root != \"Root\" or True:\n output += \" (rel2par {0})\".format(self._label)\n if span_or_leaf == \"leaf\":\n output += \" (text _!{0}_!) )\".format(self._text)\n return output\n\n def leaves(self):\n if self.is_leaf():\n yield self\n else:\n for child in self._children:\n yield from child.leaves()\n\n def oracle_label(self, left, right):\n if self._full_span[0] == left and self._full_span[1] == right:\n return self._label\n else:\n for child in self._children:\n if child._full_span[0] <= left < right <= child._full_span[1]:\n return child.oracle_label(left, right)\n return \"\"\n\n def oracle_nuclearity(self, left, right):\n if self._full_span[0] == left and self._full_span[1] == right:\n if not self._nuclearity:\n return \"Root\"\n return self._nuclearity\n else:\n for child in self._children:\n if child._full_span[0] <= left < right <= child._full_span[1]:\n return child.oracle_nuclearity(left, right)\n return \"\"\n\n def enclosing(self, left, right):\n assert self._full_span[0] <= left < right <= self._full_span[1]\n for child in self._children:\n if child.is_leaf():\n continue\n if child._full_span[0] <= left < right <= child._full_span[1]:\n return child.enclosing(left, right)\n return self\n\n def oracle_splits(self, left, right):\n return [\n child._full_span[0]\n for child in self.enclosing(left, right)._children\n if left < child._full_span[0] < right\n ]\n\n\n\nclass Tree():\n def __init__(self, root_node=None):\n self.root = root_node\n\n def build_tree_from_file(self, lines):\n has_children = False\n line = lines.pop(0).strip()\n\n # only first line. Example: ( Root (span 1 124)\n m = re.match(\"\\( Root \\(span (\\d+) (\\d+)\\)\", line)\n if m:\n t = m.groups()\n node = Node(None, None, [t[0], t[1]]) # create new node for root\n self.root = node\n has_children = True\n\n\n # Example: ( Nucleus (span 1 22) (rel2par span)\n m = re.match(\"\\( (\\S+) \\(span (\\d+) (\\d+)\\) \\(rel2par (\\S+)\\)\", line)\n if m:\n t = m.groups()\n node = Node(t[3], t[0], [t[1], t[2]])\n has_children = True\n\n if has_children:\n while lines != []:\n line = lines[0].strip()\n if line.startswith(\")\"):\n lines.pop(0)\n break\n children_node = self.build_tree_from_file(lines)\n node._children.append(children_node)\n\n return node\n\n # otherwise, it's a leaf.\n # Example: ( Nucleus (leaf 51) (rel2par Same-Unit) (text _!are clearer._!) )\n m = re.match(\"\\( (\\S+) \\(leaf (\\d+)\\) \\(rel2par (\\S+)\\) \\(text \\_\\!(.+)\\_\\!\\) \\)\",\n line)\n if m:\n t = m.groups()\n node = Node(t[2], t[0], [t[1]], t[3])\n # no children - this is a leaf.\n return node\n\n def __eq__(self, other):\n \"\"\"Overrides the default implementation\"\"\"\n return tree_utils.output_tree(self.root) == tree_utils.output_tree(other.root)\n\n\n\nclass tree_utils():\n @staticmethod\n def binarize_tree(node):\n \"\"\" binarize the tree in a case there is a node with more than 2 children. \"\"\"\n if len(node._children) == 0:\n return\n\n left, right = None, None\n\n if len(node._children) > 2:\n children = [child for child in node._children]\n node._children = []\n\n while len(children) > 2:\n right = children.pop()\n left = children.pop()\n left_span = left._span[0]\n right_span = right._span[0] if right.is_leaf() else right._span[1]\n # create a new node, that will be a new parent for two of the original children.\n new_node = Node(left._label, left._nuclearity, [left_span, right_span])\n new_node._children = [left, right]\n new_node._left = left\n new_node._right = right\n children.append(new_node)\n\n right = children.pop()\n left = children.pop()\n node._children = [left, right]\n node._left = left\n node._right = right\n\n # 1 or 2 chilren \n else:\n left = node._children[0]\n node._left = left\n\n if len(node._children) > 1:\n right = node._children[1]\n node._right = right\n\n tree_utils.binarize_tree(left)\n if right is not None:\n tree_utils.binarize_tree(right)\n\n @staticmethod\n def output_tree(node, indentation = 0):\n if node == None:\n return \"\"\n to_return = (\" \" * indentation * 2 + str(node))\n if node._children:\n left = node._children[0]\n right = node._children[1]\n if node._left_nuclearity and node._right_nuclearity:\n left._nuclearity = node._left_nuclearity\n right._nuclearity = node._right_nuclearity\n to_return += \"\\n\" + tree_utils.output_tree(left, indentation + 1)\n to_return += \"\\n\" + tree_utils.output_tree(right, indentation + 1)\n if node.is_leaf():\n return to_return + \" )\"\n return to_return + \"\\n\" + \" \" * indentation * 2 + \")\"\n\n @staticmethod\n def convert_all_files_to_trees():\n for _, _, files in os.walk(TRAINING_FOLDER):\n for file in files:\n if file.endswith('.out.dis'):\n with open(os.path.join(TRAINING_FOLDER, file), 'r') as f:\n lines = f.readlines() # last line is empty\n tree = Tree()\n tree.build_tree_from_file(lines)\n @staticmethod\n def test_one_tree():\n # usage: open the file, readlines except the last one, send to build_tree_from_file\n # after creating new Tree object, and then call binarizte_tree on tree.root.\n indices, trees = tree_utils.load_trees_from_path(TRAINING_PATH)\n print (trees[0])\n print (len(indices))\n print (indices[0:20])\n\n with open(os.path.join(TRAINING_PATH, '0606.out.dis'), 'r') as f:\n lines = f.readlines()\n tree0 = Tree()\n tree0.build_tree_from_file(lines)\n tree_utils.binarize_tree(tree0.root)\n\n with open(os.path.join(TRAINING_PATH, '0619.out.dis'), 'r') as f:\n lines = f.readlines()\n tree = Tree()\n tree.build_tree_from_file(lines)\n tree_utils.binarize_tree(tree.root)\n # print (tree_utils.output_tree(tree.root))\n # print tree.root\n # print tree.root._children[0]\n # print tree.root._children[1]\n\n # # ---\n # print (tree == tree)\n # print (tree != tree)\n # print (tree == tree0)\n # print (tree != tree0)\n\n @staticmethod\n def load_trees_from_path(path):\n \"\"\"\n Returns 2 lists.\n The first is a list of all root nodes (of each tree).\n The second is a list of the trees indices (for example, 0605),\n as a list of striings.\n \"\"\"\n trees = []\n trees_indices = []\n for _, _, files in os.walk(path):\n for file in files:\n if file.endswith('.out.dis'):\n trees_indices.append(file.replace('.out.dis', ''))\n with open(os.path.join(path, file), 'r') as f:\n lines = f.readlines()\n tree = Tree()\n # convert from out.dis format, and then binarize\n tree.build_tree_from_file(lines)\n tree_utils.binarize_tree(tree.root)\n trees.append(tree.root)\n return trees_indices, trees\n\n#tree_utils.test_one_tree()\n\n", "sub_path": "code/myTree.py", "file_name": "myTree.py", "file_ext": "py", "file_size_in_byte": 10045, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pdb.set_trace", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.map_to_cluster", "line_number": 19, "usage_type": "call"}, {"api_name": "re.match", "line_number": 113, "usage_type": "call"}, {"api_name": "{'pdb': 'pdb', 'map_to_cluster': 'utils.map_to_cluster'}", "line_number": 116, "usage_type": "call"}, {"api_name": "re.match", "line_number": 122, "usage_type": "call"}, {"api_name": "{'pdb': 'pdb', 'map_to_cluster': 'utils.map_to_cluster'}", "line_number": 125, "usage_type": "call"}, {"api_name": "re.match", "line_number": 141, "usage_type": "call"}, {"api_name": "{'pdb': 'pdb', 'map_to_cluster': 'utils.map_to_cluster'}", "line_number": 145, "usage_type": "call"}, {"api_name": "{'pdb': 'pdb', 'map_to_cluster': 'utils.map_to_cluster'}", "line_number": 174, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path", "line_number": 221, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}]} +{"seq_id": "140318567", "text": "import torch\nfrom scipy.integrate import ode\nimport numpy as np\nimport polytope as pc\nfrom typing import Optional, List, Tuple\nimport math\nimport matplotlib.pyplot as plt\n\nclass Waypoint:\n def __init__(self, mode: str, mode_parameters: List[float], time_bound: float, id: int,\n unsafeset_list = None):\n self.mode: str = mode\n self.mode_parameters: List[float] = mode_parameters\n self.time_bound: float = time_bound\n self.id = id\n self.unsafeset_list = unsafeset_list\n\n def is_equal(self, other_waypoint: List[float]):\n return tuple(self.mode_parameters) == tuple(other_waypoint.mode_parameters)\n # self.delta: np.array = (self.original_guard[1, :] - self.original_guard[0, :]) / 2\n # TODO add helper function to check if point is inside guard\n\ndef func1(t, vars, u):\n curr_x = vars[0]\n curr_y = vars[1]\n curr_theta = vars[2]\n vr = u[0]\n delta = u[1]\n\n Lr = 2\n Lf = 2\n beta = np.arctan(Lr/(Lr+Lf)*np.sin(delta)/np.cos(delta))\n dx = vr*np.cos(curr_theta+beta)\n dy = vr*np.sin(curr_theta+beta)\n dtheta = vr/Lr * np.sin(beta)\n return [dx, dy, dtheta]\n\ndef lidarSimulator(state, obstacles, range = 50, resolution = 0.05, scan_number = 360):\n x = state[0]\n y = state[1]\n theta = state[2]\n \n scan_angle_list = np.linspace(0, np.pi*2, scan_number, endpoint = False)\n base_scan_vector_x = np.expand_dims(np.arange(0,range,resolution), axis = 1)\n base_scan_vector_y = np.zeros(base_scan_vector_x.shape)\n # base_scan_vector = np.concatenate((base_scan_vector_x, base_scan_vector_y), axis = 1)\n point_cloud = []\n for scan_angle in scan_angle_list:\n # pass\n scan_vector_x = (np.cos(scan_angle + theta) * base_scan_vector_x - np.sin(scan_angle + theta) * base_scan_vector_y) + x\n scan_vector_y = (np.sin(scan_angle + theta) * base_scan_vector_x + np.cos(scan_angle + theta) * base_scan_vector_y) + y\n scan_vector = np.concatenate((scan_vector_x, scan_vector_y), axis = 1)\n idx = scan_vector.shape[0]-1\n for obstacle in obstacles:\n res = obstacle.contains(scan_vector.T)\n if np.any(res):\n val = np.argwhere(res==True)[0][0]\n if val < idx:\n idx = val\n pt_x = (np.cos(-theta)*(scan_vector_x[idx-1]-x) - np.sin(-theta)*(scan_vector_y[idx-1]-y))\n pt_y = (np.sin(-theta)*(scan_vector_x[idx-1]-x) + np.cos(-theta)*(scan_vector_y[idx-1]-y))\n point_cloud.append([pt_x, pt_y])\n return np.array(point_cloud)\n\ndef convertToWorld(state, point_cloud):\n x = state[0]\n y = state[1]\n theta = state[2]\n\n point_x = point_cloud[:,0]\n point_y = point_cloud[:,1]\n\n world_x = (np.cos(theta)*point_x - np.sin(theta)*point_y) + x\n world_y = (np.sin(theta)*point_x + np.cos(theta)*point_y) + y \n world_point_cloud = np.concatenate((world_x, world_y), axis = 1)\n return world_point_cloud\n\ndef checkObstacleFront(data_points, curr_state, waypoint):\n curr_x = curr_state[0]\n curr_y = curr_state[1]\n\n # First box\n [x2, y2] = waypoint.mode_parameters[0:2]\n theta = np.arctan2(y2-curr_y, x2-curr_x)\n x_tmp = 0.5\n y_tmp = 0\n dx = x_tmp*np.cos(theta+np.pi/2) - y_tmp*np.sin(theta+np.pi/2)\n dy = x_tmp*np.sin(theta+np.pi/2) + y_tmp*np.cos(theta+np.pi/2)\n transform_vector = np.array([[dx,dy],[dx,dy],[-dx,-dy],[-dx,-dy]])\n center_vector = np.array([[x2,y2],[curr_x,curr_y],[curr_x,curr_y],[x2,y2]])\n vertices = center_vector + transform_vector\n poly = pc.qhull(vertices)\n res1 = poly.contains(data_points[:,0:2].T)\n res1 = np.any(res1)\n\n # Second box\n [x1, y1] = waypoint.mode_parameters[0:2]\n [x2, y2] = waypoint.mode_parameters[2:4]\n theta = np.arctan2(y2-y1, x2-x1)\n x_tmp = 1\n y_tmp = 0\n dx = x_tmp*np.cos(theta+np.pi/2) - y_tmp*np.sin(theta+np.pi/2)\n dy = x_tmp*np.sin(theta+np.pi/2) + y_tmp*np.cos(theta+np.pi/2)\n transform_vector = np.array([[dx,dy],[dx,dy],[-dx,-dy],[-dx,-dy]])\n center_vector = np.array([[x2,y2],[x1,y1],[x1,y1],[x2,y2]])\n vertices = center_vector + transform_vector\n poly = pc.qhull(vertices)\n res2 = poly.contains(data_points[:,0:2].T)\n res2 = np.any(res2)\n\n # Third box\n [x1, y1] = waypoint.mode_parameters[2:4]\n [x2, y2] = waypoint.mode_parameters[4:6]\n theta = np.arctan2(y2-y1, x2-x1)\n x_tmp = 1\n y_tmp = 0\n dx = x_tmp*np.cos(theta+np.pi/2) - y_tmp*np.sin(theta+np.pi/2)\n dy = x_tmp*np.sin(theta+np.pi/2) + y_tmp*np.cos(theta+np.pi/2)\n transform_vector = np.array([[dx,dy],[dx,dy],[-dx,-dy],[-dx,-dy]])\n center_vector = np.array([[x2,y2],[x1,y1],[x1,y1],[x2,y2]])\n vertices = center_vector + transform_vector\n poly = pc.qhull(vertices)\n res3 = poly.contains(data_points[:,0:2].T)\n res3 = np.any(res3)\n\n return int(res1 or res2 or res3)\n\ndef checkObstacleFrontLeft(data_points, curr_state, waypoint):\n # Second box\n [x1, y1] = waypoint.mode_parameters[0:2]\n [x2, y2] = waypoint.mode_parameters[2:4]\n theta = np.arctan2(y2-y1, x2-x1)\n x_tmp = 4\n y_tmp = 0\n dx1 = x_tmp*np.cos(theta+np.pi/2) - y_tmp*np.sin(theta+np.pi/2)\n dy1 = x_tmp*np.sin(theta+np.pi/2) + y_tmp*np.cos(theta+np.pi/2)\n x_tmp = 2\n y_tmp = 0\n dx2 = x_tmp*np.cos(theta+np.pi/2) - y_tmp*np.sin(theta+np.pi/2)\n dy2 = x_tmp*np.sin(theta+np.pi/2) + y_tmp*np.cos(theta+np.pi/2)\n transform_vector = np.array([[dx1,dy1],[dx1,dy1],[dx2,dy2],[dx2,dy2]])\n center_vector = np.array([[x2,y2],[x1,y1],[x1,y1],[x2,y2]])\n vertices = center_vector + transform_vector\n poly = pc.qhull(vertices)\n res2_left = poly.contains(data_points[:,0:2].T)\n res2_left = np.any(res2_left)\n\n # Third box \n [x1, y1] = waypoint.mode_parameters[2:4]\n [x2, y2] = waypoint.mode_parameters[4:6]\n theta = np.arctan2(y2-y1, x2-x1)\n x_tmp = 4\n y_tmp = 0\n dx1 = x_tmp*np.cos(theta+np.pi/2) - y_tmp*np.sin(theta+np.pi/2)\n dy1 = x_tmp*np.sin(theta+np.pi/2) + y_tmp*np.cos(theta+np.pi/2)\n x_tmp = 2\n y_tmp = 0\n dx2 = x_tmp*np.cos(theta+np.pi/2) - y_tmp*np.sin(theta+np.pi/2)\n dy2 = x_tmp*np.sin(theta+np.pi/2) + y_tmp*np.cos(theta+np.pi/2)\n transform_vector = np.array([[dx1,dy1],[dx1,dy1],[dx2,dy2],[dx2,dy2]])\n center_vector = np.array([[x2,y2],[x1,y1],[x1,y1],[x2,y2]])\n vertices = center_vector + transform_vector\n poly = pc.qhull(vertices)\n res3_left = poly.contains(data_points[:,0:2].T)\n res3_left = np.any(res3_left)\n\n return int(res2_left or res3_left)\n\ndef checkObstacleFrontRight(data_points, curr_state, waypoint):\n [x1, y1] = waypoint.mode_parameters[0:2]\n [x2, y2] = waypoint.mode_parameters[2:4]\n theta = np.arctan2(y2-y1, x2-x1)\n x_tmp = 4\n y_tmp = 0\n dx1 = x_tmp*np.cos(theta-np.pi/2) - y_tmp*np.sin(theta-np.pi/2)\n dy1 = x_tmp*np.sin(theta-np.pi/2) + y_tmp*np.cos(theta-np.pi/2)\n x_tmp = 2\n y_tmp = 0\n dx2 = x_tmp*np.cos(theta-np.pi/2) - y_tmp*np.sin(theta-np.pi/2)\n dy2 = x_tmp*np.sin(theta-np.pi/2) + y_tmp*np.cos(theta-np.pi/2)\n transform_vector = np.array([[dx1,dy1],[dx1,dy1],[dx2,dy2],[dx2,dy2]])\n center_vector = np.array([[x2,y2],[x1,y1],[x1,y1],[x2,y2]])\n vertices = center_vector + transform_vector\n poly = pc.qhull(vertices)\n res2_right = poly.contains(data_points[:,0:2].T)\n res2_right = np.any(res2_right)\n\n [x1, y1] = waypoint.mode_parameters[2:4]\n [x2, y2] = waypoint.mode_parameters[4:6]\n theta = np.arctan2(y2-y1, x2-x1)\n x_tmp = 4\n y_tmp = 0\n dx1 = x_tmp*np.cos(theta-np.pi/2) - y_tmp*np.sin(theta-np.pi/2)\n dy1 = x_tmp*np.sin(theta-np.pi/2) + y_tmp*np.cos(theta-np.pi/2)\n x_tmp = 2\n y_tmp = 0\n dx2 = x_tmp*np.cos(theta-np.pi/2) - y_tmp*np.sin(theta-np.pi/2)\n dy2 = x_tmp*np.sin(theta-np.pi/2) + y_tmp*np.cos(theta-np.pi/2)\n transform_vector = np.array([[dx1,dy1],[dx1,dy1],[dx2,dy2],[dx2,dy2]])\n center_vector = np.array([[x2,y2],[x1,y1],[x1,y1],[x2,y2]])\n vertices = center_vector + transform_vector\n poly = pc.qhull(vertices)\n res3_right = poly.contains(data_points[:,0:2].T)\n res3_right = np.any(res3_right)\n\n return int(res2_right or res3_right)\n\ndef runModel(waypoint, time_step, initial_point, time_bound):\n init = initial_point\n trajectory = [init]\n r = ode(func1)\n r.set_initial_value(init)\n t = 0\n target_x = waypoint.mode_parameters[0]\n target_y = waypoint.mode_parameters[1]\n i = 0\n\n # Get lidar reading\n point_cloud = lidarSimulator(trajectory[i], waypoint.unsafeset_list)\n point_cloud = convertToWorld(trajectory[i], point_cloud)\n res_front = checkObstacleFront(point_cloud, trajectory[i], waypoint)\n res_front_left = checkObstacleFrontLeft(point_cloud, trajectory[i], waypoint)\n res_front_right = checkObstacleFrontRight(point_cloud, trajectory[i], waypoint)\n trace = [[t]]\n trace[i].extend(trajectory[i])\n trace[i].extend([res_front, res_front_left, res_front_right])\n\n while t <= time_bound:\n ex = (target_x - trajectory[i][0])*np.cos(trajectory[i][2]) + (target_y - trajectory[i][1])*np.sin(trajectory[i][2])\n ey = -(target_x - trajectory[i][0])*np.sin(trajectory[i][2]) + (target_y - trajectory[i][1])*np.cos(trajectory[i][2])\n \n k_s = 0.1\n k_n = 0.1\n v = ex * 3\n delta = ey * 1\n u = [v,delta]\n r.set_f_params(u)\n val = r.integrate(r.t + time_step)\n\n trajectory.append(val.tolist())\n\n t += time_step\n i += 1\n\n # Get lidar reading\n point_cloud = lidarSimulator(trajectory[i], waypoint.unsafeset_list)\n point_cloud = convertToWorld(trajectory[i], point_cloud)\n res_front = checkObstacleFront(point_cloud, trajectory[i], waypoint)\n res_front_left = checkObstacleFrontLeft(point_cloud, trajectory[i], waypoint)\n res_front_right = checkObstacleFrontRight(point_cloud, trajectory[i], waypoint)\n trace.append([t])\n trace[i].extend(trajectory[i])\n trace[i].extend([res_front, res_front_left, res_front_right])\n print([res_front, res_front_left, res_front_right])\n\n return trace\n\ndef TC_Simulate(waypoint, time_step, initial_point):\n res = runModel(waypoint, time_step, initial_point, waypoint.time_bound)\n return res\n\nif __name__ == \"__main__\":\n init_x = 0\n init_y = 0\n init_theta = 0\n vertices = np.array([[10,3],[15,3],[15,4],[10,4]])\n poly1 = pc.qhull(vertices)\n vertices = np.array([[10,-3],[15,-3],[15,-4],[10,-4]])\n poly2 = pc.qhull(vertices)\n waypoint = Waypoint(\"follow_waypoint\", [5,0,10,0,15,0], 10, 0, [poly1, poly2])\n init_point = [init_x, init_y, init_theta]\n res = TC_Simulate(waypoint, 0.01, init_point)\n print(res)\n \n # state = [0,1,np.pi/2]\n # vertices = np.array([[10,-5],[20,-5],[20,5],[10,5]])\n # poly1 = pc.qhull(vertices)\n # vertices = np.array([[0,10],[5,15],[0,20],[-5,15]])\n # poly2 = pc.qhull(vertices)\n\n # point_cloud = lidarSimulator(state,[poly1, poly2])\n # plt.plot(point_cloud[:,0],point_cloud[:,1],'.')\n # plt.show()\n # point_cloud = convertToWorld(state, point_cloud)\n # plt.plot(point_cloud[:,0],point_cloud[:,1],'.')\n # plt.show()\n \n", "sub_path": "2020_11_27/autoVehicleSimulator.py", "file_name": "autoVehicleSimulator.py", "file_ext": "py", "file_size_in_byte": 11164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.arctan", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 155, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 182, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 204, "usage_type": "call"}, {"api_name": "scipy.integrate.ode", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 266, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 268, "usage_type": "call"}, {"api_name": "polytope.qhull", "line_number": 269, "usage_type": "call"}]} +{"seq_id": "182500550", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom slackbot.bot import respond_to, listen_to\nimport re\nimport urllib\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nimport random\n\n@respond_to('(.*)〜$')\ndef search_lyric(message, word):\n title = []\n artist = []\n lyric_search_url = \"http://utaten.com/lyric/search?sort=popular_sort%&body=\" + word\n p = urlparse(lyric_search_url)\n query = urllib.parse.quote_plus(p.query, safe='=&')\n url = '{}://{}{}{}{}{}{}{}{}'.format(\n p.scheme, p.netloc, p.path,\n ';' if p.params else '', p.params,\n '?' if p.query else '', query,\n '#' if p.fragment else '', p.fragment)\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html, \"html.parser\")\n try:\n title= soup.find(class_=\"searchResult__title\").a.string\n name = soup.find(class_=\"searchResult__name\").a.string\n except AttributeError:\n print(\"あ、この曲は知らないな〜><\")\n else:\n title = title.strip()\n name = name.strip()\n say =[\n \"懐かしい曲ですね〜\",\n \"これ、いい曲ですよね〜!!\",\n \"これ流行ってましたね!\",\n \"あー、なんかセンチメンタルな気分。。\",\n \"この曲すごい好きなんです!!\",\n \"なんだか切ない気持ちになりますよね\",\n \"テンション上がってくるううううううう!\",\n \"なんだかしんみりしますね〜\"\n ]\n i = random.randint(0,7)\n message.send(\"「{0}」の「{1}」っていう曲ですね:dog:\\n\".format(name,title)+ say[i])\n", "sub_path": "plugins/lyric_search.py", "file_name": "lyric_search.py", "file_ext": "py", "file_size_in_byte": 1679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "urllib.parse.urlparse", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.parse.quote_plus", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 22, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 42, "usage_type": "call"}, {"api_name": "slackbot.bot.respond_to", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "341951769", "text": "import requests\r\nimport http\r\nimport urllib\r\nimport json\r\nimport time\r\nimport pyquery as pq\r\nimport schedule\r\nfrom user_info import *\r\n\r\n\r\n# 声明一个CookieJar对象实例来保存cookie\r\ncj = http.cookiejar.CookieJar()\r\n# 利用urllib库中的request的HTTPCookieProcessor对象来创建cookie处理器\r\ncp = urllib.request.HTTPCookieProcessor(cj)\r\n# 使用创建的cookie处理器来创建一个opener\r\nopener = urllib.request.build_opener(cp)\r\n# 使用自定义的opener来模拟浏览器操作\r\nurllib.request.install_opener(opener)\r\n\r\n\r\ndef send_comment(aid, message, write=True) -> str: # ok\r\n \"\"\"通过模拟浏览器请求来完成对指定视频的评论发送,并在本地记录相关信息\\n\r\n aid:视频的av号,纯数字字符串\\n\r\n message:评论的内容,字符串,可以包含转义字符\\n\r\n write:是否在本地文件中记录评论信息\"\"\"\r\n if aid[:2] == 'av':\r\n aid = aid[2:]\r\n comment = {\r\n 'oid': aid,\r\n 'type': '1',\r\n 'message': message,\r\n 'plat': '1',\r\n 'jsonp': 'jsonp',\r\n 'csrf': csrf\r\n }\r\n # 这个是发评论的接口,只要成功向这个接口发送data即可发送评论\r\n url = 'https://api.bilibili.com/x/v2/reply/add'\r\n post_data = urllib.parse.urlencode(comment).encode('utf-8')\r\n\r\n try:\r\n request = urllib.request.Request(url, headers=headers, data=post_data)\r\n # 使用自定义opener中的open方法来请求\r\n response = opener.open(request)\r\n # 以utf-8的编码来读取response的内容\r\n raw_data = response.read().decode('utf-8')\r\n # 转换成json数据(类似于字典)\r\n raw_data = json.loads(raw_data)\r\n except urllib.error.URLError as e:\r\n print(e)\r\n print('请求失败!')\r\n exit(1)\r\n # 从json数据中提取rpid(评论的编号)\r\n\r\n rpid = raw_data['data']['rpid']\r\n if write:\r\n # 记录时间\r\n local_time = time.localtime()\r\n time_str = '{}'.format(local_time[0])\r\n for i in range(5):\r\n time_str += '-' + str(local_time[i + 1]).zfill(2)\r\n with open('bili-cmt-record.txt', 'ab') as f:\r\n # 向文件中写入相关信息,一些其他的操作(删除,回复评论)需要用到这些信息\r\n s = str(rpid)+'\\tav'+aid+'\\t【'+message+'】\\t'+time_str+'\\n'\r\n # 注意中文字符要以utf-8的编码写入,否则打开会变成乱码\r\n f.write(s.encode('utf-8'))\r\n print(s)\r\n print('评论记录成功!')\r\n print('评论发送成功!')\r\n return rpid\r\n\r\n\r\ndef del_comment(rpid, aid)->bool: # ok\r\n \"\"\"通过模拟浏览器请求来完成评论的删除,和上面的类似\\n\r\n rpid:评论的编号,不是楼层号,纯数字字符串\\n\r\n aid:视频的av号,纯数字字符串\"\"\"\r\n if aid[:2] == 'av':\r\n aid = aid[2:]\r\n url = 'https://api.bilibili.com/x/v2/reply/del'\r\n comment = {\r\n 'oid': aid,\r\n 'type': '1',\r\n 'rpid': rpid,\r\n 'jsonp': 'jsonp',\r\n 'csrf': csrf\r\n }\r\n postdata = urllib.parse.urlencode(comment).encode('utf-8')\r\n # cj = http.cookiejar.CookieJar()\r\n # opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))\r\n # urllib.request.install_opener(opener)\r\n\r\n try:\r\n request = urllib.request.Request(url, headers=headers, data=postdata)\r\n response = opener.open(request)\r\n raw_data = response.read().decode('utf-8')\r\n raw_data = json.loads(raw_data)\r\n message = raw_data['message']\r\n if message == '0':\r\n print('评论删除成功!')\r\n else:\r\n print(message)\r\n except urllib.error.URLError as e:\r\n print(e)\r\n print('评论删除失败!')\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef danmaku_report(cid, dmid, referer, reason='10')->bool: # TODO:有待完善,自动获取cid,dmid\r\n \"\"\"利用弹幕编号、dmid等信息举报弹幕\\n\r\n cid:视频弹幕池编号\\n\r\n dmid:弹幕编号\\n\r\n referer:视频播放页地址\\n\r\n reason:举报的理由\"\"\"\r\n url = 'https://api.bilibili.com/x/dm/report/add'\r\n header = headers.copy()\r\n header['Referer'] = referer # 'https://www.bilibili.com/bangumi/play/ep232407' # 视频url\r\n data = {\r\n 'cid': cid, # '49075258', # 视频弹幕池编号\r\n 'dmid': dmid, # '3672991774801924', # 弹幕编号\r\n 'reason': reason, # 举报理由,如下,随序号递增影响、受理速度下降\r\n \"\"\" 1.违法违规 2.色情低俗\r\n 3.赌博诈骗 4.人身攻击\r\n 5.侵犯隐私 6.垃圾广告\r\n 7.引战 8.剧透\r\n 9.恶意刷屏 10.视频无关\"\"\"\r\n 'jsonp': 'jsonp',\r\n 'csrf': csrf\r\n }\r\n r = requests.post(url, data=data, headers=header)\r\n print(r.json()['message'])\r\n if r.status_code == 200:\r\n print('弹幕举报成功')\r\n return True\r\n else:\r\n print('弹幕举报失败')\r\n return False\r\n\r\n\r\ndef ep_2_av(ep, times_=200)->str: # ok\r\n \"\"\"给定视频的ep号,返回对于的av号,用于发送评论等\\n\r\n ep:ep号,开头为ep之后是纯数字的字符串,例如‘ep232536’\"\"\"\r\n url = 'https://www.bilibili.com/bangumi/play/'+ep\r\n headers_ = headers.copy()\r\n headers_['Host'] = 'www.bilibili.com'\r\n headers_['Referer'] = 'https://t.bilibili.com/pages/nav/index'\r\n headers_['Upgrade - Insecure - Requests'] = '1'\r\n times = 1\r\n \"\"\"如果404则暂停一段时间再继续请求\"\"\"\r\n while True:\r\n while True:\r\n response = requests.get(url, headers=headers_)\r\n if response.status_code == 404:\r\n print('正在重试...第{}次'.format(times), end='\\r')\r\n if times == times_:\r\n return ''\r\n times += 1\r\n time.sleep(.5)\r\n elif response.status_code == 200:\r\n break\r\n html = response.text\r\n doc = pq.PyQuery(html)\r\n doc = doc('body')\r\n doc = doc('#bangumi_header .header-info div')('.info-second')('.info-sec-av')\r\n # 关键是获得一个视频的oid,得到后就可以利用api发送评论了\r\n oid = doc.text()[2:] # 这里要去掉前缀的两个字母\r\n if oid == '':\r\n \"\"\"每0.5秒重试一次,频率过高可能会被封ip\"\"\"\r\n print('正在重试...第{}次'.format(times), end='\\r')\r\n times += 1\r\n time.sleep(.5)\r\n continue\r\n else:\r\n return oid\r\n\r\n\r\ndef auto_reply(ep, message, times): # ok\r\n \"\"\"自动在指定的视频下发送评论\\n\r\n ep:视频的av号或者ep号\\n\"\"\"\r\n if ep[:2] == 'ep':\r\n oid = 'av' + ep_2_av(ep, times)\r\n if oid == '':\r\n return\r\n elif ep[:2] == 'av':\r\n oid = ep\r\n else:\r\n print(ep, '的格式错误!')\r\n return\r\n rpid = send_comment(oid, message)\r\n print('自动评论发送成功!')\r\n print(rpid)\r\n return\r\n\r\n\r\ndef auto_action(rpid, referer, oid, action='1')->bool: # ok\r\n \"\"\"自动给评论点赞,\\n\r\n rpid:需要点赞的评论编号\\n\r\n referer:评论页面的url\\n\r\n oid:视频的av号\\n\r\n action: 如果action是'2'就取消点赞\"\"\"\r\n url = 'https://api.bilibili.com/x/v2/reply/action'\r\n headers_ = headers.copy()\r\n headers_['Host'] = 'api.bilibili.com'\r\n headers_['Referer'] = referer\r\n headers_['Upgrade - Insecure - Requests'] = '1'\r\n if oid[:2] == 'av':\r\n oid = oid[2:]\r\n if action == '1':\r\n behavior = ''\r\n else:\r\n behavior = '取消'\r\n data = {\r\n 'oid': oid, # 此处oid与referer中的数字并不总是统一的,所以要单独传入\r\n 'type': '1', # 不变\r\n 'rpid': rpid, # 评论编号\r\n 'action': action, # 点赞, '0' 表示取消点赞\r\n 'jsonp': 'jsonp',\r\n 'csrf': csrf\r\n }\r\n # print(referer)\r\n postdata = urllib.parse.urlencode(data).encode('utf-8')\r\n\r\n try:\r\n request = urllib.request.Request(url, headers=headers, data=postdata)\r\n # 这里使用全局的opener\r\n response = opener.open(request)\r\n raw_data = response.read().decode('utf-8')\r\n raw_data = json.loads(raw_data)\r\n message = raw_data['message']\r\n if message == '0':\r\n print('评论' + behavior + '点赞成功!')\r\n else:\r\n print(message)\r\n except urllib.error.URLError as e:\r\n print(e)\r\n print('评论'+behavior+'点赞失败!')\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef get_comment(aid, page=1, write=True)->[str]:\r\n \"\"\"获得指定视频的评论\r\n aid:视频av号\r\n page:第page页的评论\"\"\"\r\n if aid[:2] == 'av':\r\n aid = aid[2:]\r\n url = 'https://api.bilibili.com/x/v2/reply?pn={}&type=1&oid='.format(page)+aid\r\n req = requests.get(url=url)\r\n json_ = req.text\r\n raw_data = json.loads(json_)\r\n # s = json_.encode('utf-8')\r\n\r\n # acount = raw_data['data']['page']['acount'] # 当前的总评论数,包括楼中楼\r\n replies = []\r\n\r\n for _ in range(20):\r\n replies.append(raw_data['data']['replies'][_]['content']['message'])\r\n if write:\r\n with open('comment-' + 'av' + aid + '.txt', 'wb') as f:\r\n for _ in replies:\r\n f.write((_+'\\n').encode('utf-8'))\r\n return replies\r\n\r\n\r\ndef get_hots(aid, write=True)->[str]:\r\n \"\"\"获得视频的热评,\\n\r\n aid:视频的av号\"\"\"\r\n if aid[:2] == 'av':\r\n aid = aid[2:]\r\n url = 'https://api.bilibili.com/x/v2/reply?pn=1&type=1&oid='+aid\r\n req = requests.get(url=url)\r\n json_ = req.text\r\n raw_data = json.loads(json_)\r\n # 以下是热评的部分\r\n hots = raw_data['data']['hots']\r\n hots = [_['content']['message'] for _ in hots]\r\n return hots\r\n\r\n\r\ndef get_cid_av(av)->str:\r\n \"\"\"用于获得指定视频的cid,有了cid就可以发弹幕、获得弹幕\\n\r\n av:视频的av号,含前缀\\n\r\n 返回cid,纯数字字符串\"\"\"\r\n url = 'https://www.bilibili.com/video/' + av\r\n response = requests.get(url)\r\n s = response.text\r\n # 利用pyquery把cid从html中解析出来\r\n doc = pq.PyQuery(s)\r\n doc = doc('body')('#app div')('.player-box')('#arc_toolbar_report div')('#playpage_share div')('.share-popup div')\r\n doc = doc('.share-address')('ul')('li').items()\r\n for _ in doc:\r\n s = str(_('#link2'))\r\n if s == '':\r\n continue\r\n s = s.split('&')[3][8:]\r\n return s\r\n\r\n\r\ndef get_danmaku_av(av)->[str]:\r\n \"\"\"获取原始的弹幕,返回字符串列表\"\"\"\r\n cid = get_cid_av(av)\r\n comment_url = 'https://comment.bilibili.com/'+cid+'.xml'\r\n response = requests.get(comment_url)\r\n response.encoding = 'utf-8'\r\n text = response.text.split('><')[9:][:-1]\r\n text = [_[4:-3]for _ in text]\r\n return text\r\n\r\n\r\ndef get_num_danmaku(av, write=True, name='danmaku.txt')->int:\r\n \"\"\"利用cid来获取某个视频的全部弹幕,并写入文件,有待改进,例如记录弹幕的各种属性,目前ok\\n\r\n cid:视频的cid,纯数字字符串\r\n write:是否将弹幕写入文件\r\n name:写入的文件名\r\n 最后返回弹幕的总条数,注意是当前的弹幕总数,不是历史总数,因为弹幕池有上限\r\n \"\"\"\r\n cid = get_cid_av(av) # 获取cid\r\n text = get_danmaku_av(cid)\r\n if write:\r\n with open(name, 'wb') as f:\r\n for _ in text:\r\n \"\"\"以一条弹幕为例:\r\n 3.21500,1,25,16777215,1534147850,0,8e982cdb,3672991774801924\"', '从抖音来\r\n 时间(s),弹幕类型,字号,未知,rnd(未知),颜色,未知,弹幕编号\"\"\"\r\n line = _.split('>')\r\n # info = line[0].split(',') # 需要弹幕信息的可以从info中处理获得,暂时注释掉\r\n # 获得方式如下\r\n # dmid = info[7][:-1]\r\n # danmaku = line[1][:-3]+'\\n'\r\n\r\n f.write((line[0]+'\\t'+line[1]+'\\n').encode('utf-8'))\r\n return len(text)\r\n\r\n\r\ndef first_floor(ep, message, run_time='22:23', max_times=200):\r\n \"\"\"为了更加自动的实现在指定时间段的抢楼,使用轻量的schedule实现定时任务\\n\r\n 用于实现自动在未来预测的某个视频下抢楼,抢楼是违规行为,\\n\r\n 这里仅用于学习,请勿用于非法!\r\n ep:视频的av号或者ep号\\n\r\n message:评论的内容\\n\r\n run_time:发送的时间,如果失败会重试\\n\r\n max_times:最大尝试次数,默认为200次\"\"\"\r\n second = 0\r\n # 程序第一次运行到这里会添加一个定时任务,不会立刻运行\r\n # 这里默认设置为每天的22:59执行auto_reply函数\r\n schedule.every().day.at(run_time).do(auto_reply, ep, message, max_times)\r\n while True:\r\n \"\"\"这里用一个死循环每隔一秒检查条件是否满足,满足则执行之前添加的定时任务\r\n 这里脚本刚运行时会被挂起,直到本地时间到run_time自动去执行任务\"\"\"\r\n print('\\r等待中......已等待{}秒'.format(second), end='')\r\n schedule.run_pending()\r\n time.sleep(1)\r\n second += 1\r\n\r\n", "sub_path": "bili.py", "file_name": "bili.py", "file_ext": "py", "file_size_in_byte": 13338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "http.cookiejar.CookieJar", "line_number": 12, "usage_type": "call"}, {"api_name": "http.cookiejar", "line_number": 12, "usage_type": "attribute"}, {"api_name": "urllib.request.HTTPCookieProcessor", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 14, "usage_type": "attribute"}, {"api_name": "urllib.request.build_opener", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.request.install_opener", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 18, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlencode", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 38, "usage_type": "attribute"}, {"api_name": "urllib.request.Request", "line_number": 41, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 41, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib.error", "line_number": 48, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 57, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 86, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 86, "usage_type": "attribute"}, {"api_name": "urllib.request.Request", "line_number": 92, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 92, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 95, "usage_type": "call"}, {"api_name": "urllib.error", "line_number": 101, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 130, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 152, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 158, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 162, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 171, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 221, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 221, "usage_type": "attribute"}, {"api_name": "urllib.request.Request", "line_number": 224, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 224, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 228, "usage_type": "call"}, {"api_name": "urllib.error", "line_number": 234, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 249, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 251, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 272, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 274, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 286, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 289, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 304, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 347, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 352, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 353, "usage_type": "call"}]} +{"seq_id": "478470134", "text": "from .data import Alliance, Attribute, CloakState, DisplayType, TargetType\nfrom .ids.ability_id import *\nfrom .ids.buff_id import *\nfrom .ids.effect_id import *\nfrom .ids.unit_typeid import *\nfrom .ids.upgrade_id import *\nfrom collections import defaultdict\nfrom typing import Dict, Set\n\nmineral_ids: Set[int] = {\n RICHMINERALFIELD.value,\n RICHMINERALFIELD750.value,\n MINERALFIELD.value,\n MINERALFIELD450.value,\n MINERALFIELD750.value,\n LABMINERALFIELD.value,\n LABMINERALFIELD750.value,\n PURIFIERRICHMINERALFIELD.value,\n PURIFIERRICHMINERALFIELD750.value,\n PURIFIERMINERALFIELD.value,\n PURIFIERMINERALFIELD750.value,\n BATTLESTATIONMINERALFIELD.value,\n BATTLESTATIONMINERALFIELD750.value,\n MINERALFIELDOPAQUE.value,\n MINERALFIELDOPAQUE900.value,\n}\ngeyser_ids: Set[int] = {\n VESPENEGEYSER.value,\n SPACEPLATFORMGEYSER.value,\n RICHVESPENEGEYSER.value,\n PROTOSSVESPENEGEYSER.value,\n PURIFIERVESPENEGEYSER.value,\n SHAKURASVESPENEGEYSER.value,\n}\ntransforming: Dict[UnitTypeId, AbilityId] = {\n # terran structures\n BARRACKS: LAND_BARRACKS,\n BARRACKSFLYING: LAND_BARRACKS,\n COMMANDCENTER: LAND_COMMANDCENTER,\n COMMANDCENTERFLYING: LAND_COMMANDCENTER,\n ORBITALCOMMAND: LAND_ORBITALCOMMAND,\n ORBITALCOMMANDFLYING: LAND_ORBITALCOMMAND,\n FACTORY: LAND_FACTORY,\n FACTORYFLYING: LAND_FACTORY,\n STARPORT: LAND_STARPORT,\n STARPORTFLYING: LAND_STARPORT,\n SUPPLYDEPOT: MORPH_SUPPLYDEPOT_RAISE,\n SUPPLYDEPOTLOWERED: MORPH_SUPPLYDEPOT_LOWER,\n # terran units\n HELLION: MORPH_HELLION,\n HELLIONTANK: MORPH_HELLBAT,\n LIBERATOR: MORPH_LIBERATORAAMODE,\n LIBERATORAG: MORPH_LIBERATORAGMODE,\n SIEGETANK: UNSIEGE_UNSIEGE,\n SIEGETANKSIEGED: SIEGEMODE_SIEGEMODE,\n THOR: MORPH_THOREXPLOSIVEMODE,\n THORAP: MORPH_THORHIGHIMPACTMODE,\n VIKINGASSAULT: MORPH_VIKINGASSAULTMODE,\n VIKINGFIGHTER: MORPH_VIKINGFIGHTERMODE,\n WIDOWMINE: BURROWUP,\n WIDOWMINEBURROWED: BURROWDOWN,\n # protoss structures\n GATEWAY: MORPH_GATEWAY,\n WARPGATE: MORPH_WARPGATE,\n # protoss units\n OBSERVER: MORPH_OBSERVERMODE,\n OBSERVERSIEGEMODE: MORPH_SURVEILLANCEMODE,\n WARPPRISM: MORPH_WARPPRISMTRANSPORTMODE,\n WARPPRISMPHASING: MORPH_WARPPRISMPHASINGMODE,\n # zerg structures\n SPINECRAWLER: SPINECRAWLERROOT_SPINECRAWLERROOT,\n SPINECRAWLERUPROOTED: SPINECRAWLERUPROOT_SPINECRAWLERUPROOT,\n SPORECRAWLER: SPORECRAWLERROOT_SPORECRAWLERROOT,\n SPORECRAWLERUPROOTED: SPORECRAWLERUPROOT_SPORECRAWLERUPROOT,\n # zerg units\n BANELING: BURROWUP_BANELING,\n BANELINGBURROWED: BURROWDOWN_BANELING,\n DRONE: BURROWUP_DRONE,\n DRONEBURROWED: BURROWDOWN_DRONE,\n HYDRALISK: BURROWUP_HYDRALISK,\n HYDRALISKBURROWED: BURROWDOWN_HYDRALISK,\n INFESTOR: BURROWUP_INFESTOR,\n INFESTORBURROWED: BURROWDOWN_INFESTOR,\n INFESTORTERRAN: BURROWUP_INFESTORTERRAN,\n INFESTORTERRANBURROWED: BURROWDOWN_INFESTORTERRAN,\n LURKERMP: BURROWUP_LURKER,\n LURKERMPBURROWED: BURROWDOWN_LURKER,\n OVERSEER: MORPH_OVERSEERMODE,\n OVERSEERSIEGEMODE: MORPH_OVERSIGHTMODE,\n QUEEN: BURROWUP_QUEEN,\n QUEENBURROWED: BURROWDOWN_QUEEN,\n ROACH: BURROWUP_ROACH,\n ROACHBURROWED: BURROWDOWN_ROACH,\n SWARMHOSTBURROWEDMP: BURROWDOWN_SWARMHOST,\n SWARMHOSTMP: BURROWUP_SWARMHOST,\n ULTRALISK: BURROWUP_ULTRALISK,\n ULTRALISKBURROWED: BURROWDOWN_ULTRALISK,\n ZERGLING: BURROWUP_ZERGLING,\n ZERGLINGBURROWED: BURROWDOWN_ZERGLING,\n}\n# For now only contains units that cost supply, used in bot_ai.do()\nabilityid_to_unittypeid: Dict[AbilityId, UnitTypeId] = {\n # Protoss\n AbilityId.NEXUSTRAIN_PROBE: UnitTypeId.PROBE,\n AbilityId.GATEWAYTRAIN_ZEALOT: UnitTypeId.ZEALOT,\n AbilityId.WARPGATETRAIN_ZEALOT: UnitTypeId.ZEALOT,\n AbilityId.TRAIN_ADEPT: UnitTypeId.ADEPT,\n AbilityId.TRAINWARP_ADEPT: UnitTypeId.ADEPT,\n AbilityId.GATEWAYTRAIN_STALKER: UnitTypeId.STALKER,\n AbilityId.WARPGATETRAIN_STALKER: UnitTypeId.STALKER,\n AbilityId.GATEWAYTRAIN_SENTRY: UnitTypeId.SENTRY,\n AbilityId.WARPGATETRAIN_SENTRY: UnitTypeId.SENTRY,\n AbilityId.GATEWAYTRAIN_DARKTEMPLAR: UnitTypeId.DARKTEMPLAR,\n AbilityId.WARPGATETRAIN_DARKTEMPLAR: UnitTypeId.DARKTEMPLAR,\n AbilityId.GATEWAYTRAIN_HIGHTEMPLAR: UnitTypeId.HIGHTEMPLAR,\n AbilityId.WARPGATETRAIN_HIGHTEMPLAR: UnitTypeId.HIGHTEMPLAR,\n AbilityId.ROBOTICSFACILITYTRAIN_OBSERVER: UnitTypeId.OBSERVER,\n AbilityId.ROBOTICSFACILITYTRAIN_COLOSSUS: UnitTypeId.COLOSSUS,\n AbilityId.ROBOTICSFACILITYTRAIN_IMMORTAL: UnitTypeId.IMMORTAL,\n AbilityId.ROBOTICSFACILITYTRAIN_WARPPRISM: UnitTypeId.WARPPRISM,\n AbilityId.STARGATETRAIN_CARRIER: UnitTypeId.CARRIER,\n AbilityId.STARGATETRAIN_ORACLE: UnitTypeId.ORACLE,\n AbilityId.STARGATETRAIN_PHOENIX: UnitTypeId.PHOENIX,\n AbilityId.STARGATETRAIN_TEMPEST: UnitTypeId.TEMPEST,\n AbilityId.STARGATETRAIN_VOIDRAY: UnitTypeId.VOIDRAY,\n AbilityId.NEXUSTRAINMOTHERSHIP_MOTHERSHIP: UnitTypeId.MOTHERSHIP,\n # Terran\n AbilityId.COMMANDCENTERTRAIN_SCV: UnitTypeId.SCV,\n AbilityId.BARRACKSTRAIN_MARINE: UnitTypeId.MARINE,\n AbilityId.BARRACKSTRAIN_GHOST: UnitTypeId.GHOST,\n AbilityId.BARRACKSTRAIN_MARAUDER: UnitTypeId.MARAUDER,\n AbilityId.BARRACKSTRAIN_REAPER: UnitTypeId.REAPER,\n AbilityId.FACTORYTRAIN_HELLION: UnitTypeId.HELLION,\n AbilityId.FACTORYTRAIN_SIEGETANK: UnitTypeId.SIEGETANK,\n AbilityId.FACTORYTRAIN_THOR: UnitTypeId.THOR,\n AbilityId.FACTORYTRAIN_WIDOWMINE: UnitTypeId.WIDOWMINE,\n AbilityId.TRAIN_HELLBAT: UnitTypeId.HELLIONTANK,\n AbilityId.TRAIN_CYCLONE: UnitTypeId.CYCLONE,\n AbilityId.STARPORTTRAIN_RAVEN: UnitTypeId.RAVEN,\n AbilityId.STARPORTTRAIN_VIKINGFIGHTER: UnitTypeId.VIKINGFIGHTER,\n AbilityId.STARPORTTRAIN_MEDIVAC: UnitTypeId.MEDIVAC,\n AbilityId.STARPORTTRAIN_BATTLECRUISER: UnitTypeId.BATTLECRUISER,\n AbilityId.STARPORTTRAIN_BANSHEE: UnitTypeId.BANSHEE,\n AbilityId.STARPORTTRAIN_LIBERATOR: UnitTypeId.LIBERATOR,\n # Zerg\n AbilityId.LARVATRAIN_DRONE: UnitTypeId.DRONE,\n AbilityId.LARVATRAIN_OVERLORD: UnitTypeId.OVERLORD,\n AbilityId.LARVATRAIN_ZERGLING: UnitTypeId.ZERGLING,\n AbilityId.LARVATRAIN_ROACH: UnitTypeId.ROACH,\n AbilityId.LARVATRAIN_HYDRALISK: UnitTypeId.HYDRALISK,\n AbilityId.LARVATRAIN_MUTALISK: UnitTypeId.MUTALISK,\n AbilityId.LARVATRAIN_CORRUPTOR: UnitTypeId.CORRUPTOR,\n AbilityId.LARVATRAIN_ULTRALISK: UnitTypeId.ULTRALISK,\n AbilityId.LARVATRAIN_INFESTOR: UnitTypeId.INFESTOR,\n AbilityId.LARVATRAIN_VIPER: UnitTypeId.VIPER,\n AbilityId.LOCUSTTRAIN_SWARMHOST: UnitTypeId.SWARMHOSTMP,\n AbilityId.TRAINQUEEN_QUEEN: UnitTypeId.QUEEN,\n}\n\nIS_STRUCTURE = Attribute.Structure.value\nIS_LIGHT = Attribute.Light.value\nIS_ARMORED = Attribute.Armored.value\nIS_BIOLOGICAL = Attribute.Biological.value\nIS_MECHANICAL = Attribute.Mechanical.value\nIS_MASSIVE = Attribute.Massive.value\nIS_PSIONIC = Attribute.Psionic.value\nUNIT_BATTLECRUISER = UnitTypeId.BATTLECRUISER\nUNIT_ORACLE = UnitTypeId.ORACLE\nTARGET_GROUND: Set[int] = {TargetType.Ground.value, TargetType.Any.value}\nTARGET_AIR: Set[int] = {TargetType.Air.value, TargetType.Any.value}\nTARGET_BOTH = TARGET_GROUND | TARGET_AIR\nIS_SNAPSHOT = DisplayType.Snapshot.value\nIS_VISIBLE = DisplayType.Visible.value\nIS_MINE = Alliance.Self.value\nIS_ENEMY = Alliance.Enemy.value\nIS_CLOAKED: Set[int] = {CloakState.Cloaked.value, CloakState.CloakedDetected.value, CloakState.CloakedAllied.value}\nIS_REVEALED: Set[int] = CloakState.CloakedDetected.value\nCAN_BE_ATTACKED: Set[int] = {CloakState.NotCloaked.value, CloakState.CloakedDetected.value}\nIS_CARRYING_MINERALS: Set[BuffId] = {BuffId.CARRYMINERALFIELDMINERALS, BuffId.CARRYHIGHYIELDMINERALFIELDMINERALS}\nIS_CARRYING_VESPENE: Set[BuffId] = {\n BuffId.CARRYHARVESTABLEVESPENEGEYSERGAS,\n BuffId.CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS,\n BuffId.CARRYHARVESTABLEVESPENEGEYSERGASZERG,\n}\nIS_CARRYING_RESOURCES: Set[BuffId] = IS_CARRYING_MINERALS | IS_CARRYING_VESPENE\nIS_ATTACKING = {\n AbilityId.ATTACK,\n AbilityId.ATTACK_ATTACK,\n AbilityId.ATTACK_ATTACKTOWARDS,\n AbilityId.ATTACK_ATTACKBARRAGE,\n AbilityId.SCAN_MOVE,\n}\nIS_PATROLLING = AbilityId.PATROL_PATROL\nIS_GATHERING = AbilityId.HARVEST_GATHER\nIS_RETURNING = AbilityId.HARVEST_RETURN\nIS_COLLECTING = {IS_GATHERING, IS_RETURNING}\nIS_CONSTRUCTING_SCV: Set[AbilityId] = {\n AbilityId.TERRANBUILD_ARMORY,\n AbilityId.TERRANBUILD_BARRACKS,\n AbilityId.TERRANBUILD_BUNKER,\n AbilityId.TERRANBUILD_COMMANDCENTER,\n AbilityId.TERRANBUILD_ENGINEERINGBAY,\n AbilityId.TERRANBUILD_FACTORY,\n AbilityId.TERRANBUILD_FUSIONCORE,\n AbilityId.TERRANBUILD_GHOSTACADEMY,\n AbilityId.TERRANBUILD_MISSILETURRET,\n AbilityId.TERRANBUILD_REFINERY,\n AbilityId.TERRANBUILD_SENSORTOWER,\n AbilityId.TERRANBUILD_STARPORT,\n AbilityId.TERRANBUILD_SUPPLYDEPOT,\n}\nIS_REPAIRING: Set[AbilityId] = {AbilityId.EFFECT_REPAIR, AbilityId.EFFECT_REPAIR_MULE, AbilityId.EFFECT_REPAIR_SCV}\nIS_DETECTOR: Set[UnitTypeId] = {\n UnitTypeId.OBSERVER,\n UnitTypeId.OBSERVERSIEGEMODE,\n UnitTypeId.RAVEN,\n UnitTypeId.MISSILETURRET,\n UnitTypeId.OVERSEER,\n UnitTypeId.OVERSEERSIEGEMODE,\n UnitTypeId.SPORECRAWLER,\n}\nUNIT_PHOTONCANNON = UnitTypeId.PHOTONCANNON\nUNIT_COLOSSUS = UnitTypeId.COLOSSUS\nFakeEffectRadii: Dict[int, float] = {\n UnitTypeId.KD8CHARGE.value: 2,\n UnitTypeId.PARASITICBOMBDUMMY.value: 3,\n UnitTypeId.FORCEFIELD.value: 1.5,\n}\nFakeEffectID: Dict[int, str] = {\n UnitTypeId.KD8CHARGE.value: \"KD8CHARGE\",\n UnitTypeId.PARASITICBOMBDUMMY.value: \"PARASITICBOMB\",\n UnitTypeId.FORCEFIELD.value: \"FORCEFIELD\",\n}\n\n\ndef return_NOTAUNIT():\n # NOTAUNIT = 0\n return NOTAUNIT\n\n\nTERRAN_TECH_REQUIREMENT: Dict[UnitTypeId, UnitTypeId] = defaultdict(\n return_NOTAUNIT,\n {\n MISSILETURRET: ENGINEERINGBAY,\n SENSORTOWER: ENGINEERINGBAY,\n PLANETARYFORTRESS: ENGINEERINGBAY,\n BARRACKS: SUPPLYDEPOT,\n ORBITALCOMMAND: BARRACKS,\n BUNKER: BARRACKS,\n GHOST: GHOSTACADEMY,\n GHOSTACADEMY: BARRACKS,\n FACTORY: BARRACKS,\n ARMORY: FACTORY,\n HELLIONTANK: ARMORY,\n THOR: ARMORY,\n STARPORT: FACTORY,\n FUSIONCORE: STARPORT,\n BATTLECRUISER: FUSIONCORE,\n },\n)\nPROTOSS_TECH_REQUIREMENT: Dict[UnitTypeId, UnitTypeId] = defaultdict(\n return_NOTAUNIT,\n {\n PHOTONCANNON: FORGE,\n CYBERNETICSCORE: GATEWAY,\n SENTRY: CYBERNETICSCORE,\n STALKER: CYBERNETICSCORE,\n ADEPT: CYBERNETICSCORE,\n TWILIGHTCOUNCIL: CYBERNETICSCORE,\n SHIELDBATTERY: CYBERNETICSCORE,\n TEMPLARARCHIVE: TWILIGHTCOUNCIL,\n DARKSHRINE: TWILIGHTCOUNCIL,\n HIGHTEMPLAR: TEMPLARARCHIVE,\n DARKTEMPLAR: DARKSHRINE,\n STARGATE: CYBERNETICSCORE,\n TEMPEST: FLEETBEACON,\n CARRIER: FLEETBEACON,\n MOTHERSHIP: FLEETBEACON,\n ROBOTICSFACILITY: CYBERNETICSCORE,\n ROBOTICSBAY: ROBOTICSFACILITY,\n COLOSSUS: ROBOTICSBAY,\n DISRUPTOR: ROBOTICSBAY,\n },\n)\nZERG_TECH_REQUIREMENT: Dict[UnitTypeId, UnitTypeId] = defaultdict(\n return_NOTAUNIT,\n {\n ZERGLING: SPAWNINGPOOL,\n QUEEN: SPAWNINGPOOL,\n ROACHWARREN: SPAWNINGPOOL,\n BANELINGNEST: SPAWNINGPOOL,\n SPINECRAWLER: SPAWNINGPOOL,\n SPORECRAWLER: SPAWNINGPOOL,\n ROACH: ROACHWARREN,\n BANELING: BANELINGNEST,\n LAIR: SPAWNINGPOOL,\n OVERSEER: LAIR,\n OVERLORDTRANSPORT: LAIR,\n INFESTATIONPIT: LAIR,\n INFESTOR: INFESTATIONPIT,\n SWARMHOSTMP: INFESTATIONPIT,\n HYDRALISKDEN: LAIR,\n HYDRALISK: HYDRALISKDEN,\n LURKERDENMP: HYDRALISKDEN,\n LURKERMP: LURKERDENMP,\n SPIRE: LAIR,\n MUTALISK: SPIRE,\n CORRUPTOR: SPIRE,\n NYDUSNETWORK: LAIR,\n HIVE: INFESTATIONPIT,\n VIPER: HIVE,\n ULTRALISKCAVERN: HIVE,\n GREATERSPIRE: HIVE,\n BROODLORD: GREATERSPIRE,\n },\n)\n", "sub_path": "sc2/constants.py", "file_name": "constants.py", "file_ext": "py", "file_size_in_byte": 11882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.Set", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 102, "usage_type": "name"}, {"api_name": "data.Attribute.Structure", "line_number": 160, "usage_type": "attribute"}, {"api_name": "data.Attribute", "line_number": 160, "usage_type": "name"}, {"api_name": "data.Attribute.Light", "line_number": 161, "usage_type": "attribute"}, {"api_name": "data.Attribute", "line_number": 161, "usage_type": "name"}, {"api_name": "data.Attribute.Armored", "line_number": 162, "usage_type": "attribute"}, {"api_name": "data.Attribute", "line_number": 162, "usage_type": "name"}, {"api_name": "data.Attribute.Biological", "line_number": 163, "usage_type": "attribute"}, {"api_name": "data.Attribute", "line_number": 163, "usage_type": "name"}, {"api_name": "data.Attribute.Mechanical", "line_number": 164, "usage_type": "attribute"}, {"api_name": "data.Attribute", "line_number": 164, "usage_type": "name"}, {"api_name": "data.Attribute.Massive", "line_number": 165, "usage_type": "attribute"}, {"api_name": "data.Attribute", "line_number": 165, "usage_type": "name"}, {"api_name": "data.Attribute.Psionic", "line_number": 166, "usage_type": "attribute"}, {"api_name": "data.Attribute", "line_number": 166, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 169, "usage_type": "name"}, {"api_name": "data.TargetType.Ground", "line_number": 169, "usage_type": "attribute"}, {"api_name": "data.TargetType", "line_number": 169, "usage_type": "name"}, {"api_name": "data.TargetType.Any", "line_number": 169, "usage_type": "attribute"}, {"api_name": "typing.Set", "line_number": 170, "usage_type": "name"}, {"api_name": "data.TargetType.Air", "line_number": 170, "usage_type": "attribute"}, {"api_name": "data.TargetType", "line_number": 170, "usage_type": "name"}, {"api_name": "data.TargetType.Any", "line_number": 170, "usage_type": "attribute"}, {"api_name": "data.DisplayType.Snapshot", "line_number": 172, "usage_type": "attribute"}, {"api_name": "data.DisplayType", "line_number": 172, "usage_type": "name"}, {"api_name": "data.DisplayType.Visible", "line_number": 173, "usage_type": "attribute"}, {"api_name": "data.DisplayType", "line_number": 173, "usage_type": "name"}, {"api_name": "data.Alliance.Self", "line_number": 174, "usage_type": "attribute"}, {"api_name": "data.Alliance", "line_number": 174, "usage_type": "name"}, {"api_name": "data.Alliance.Enemy", "line_number": 175, "usage_type": "attribute"}, {"api_name": "data.Alliance", "line_number": 175, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 176, "usage_type": "name"}, {"api_name": "data.CloakState.Cloaked", "line_number": 176, "usage_type": "attribute"}, {"api_name": "data.CloakState", "line_number": 176, "usage_type": "name"}, {"api_name": "data.CloakState.CloakedDetected", "line_number": 176, "usage_type": "attribute"}, {"api_name": "data.CloakState.CloakedAllied", "line_number": 176, "usage_type": "attribute"}, {"api_name": "typing.Set", "line_number": 177, "usage_type": "name"}, {"api_name": "data.CloakState.CloakedDetected", "line_number": 177, "usage_type": "attribute"}, {"api_name": "data.CloakState", "line_number": 177, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 178, "usage_type": "name"}, {"api_name": "data.CloakState.NotCloaked", "line_number": 178, "usage_type": "attribute"}, {"api_name": "data.CloakState", "line_number": 178, "usage_type": "name"}, {"api_name": "data.CloakState.CloakedDetected", "line_number": 178, "usage_type": "attribute"}, {"api_name": "typing.Set", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 180, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 185, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 197, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 213, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 224, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 229, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 241, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 241, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 261, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 261, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 285, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 285, "usage_type": "call"}]} +{"seq_id": "516346378", "text": "\"\"\"Code to construct puncture initial data for single black hole.\"\"\"\r\nimport sys\r\nfrom numpy import zeros, size, sqrt, linspace, meshgrid\r\nimport scipy.linalg as la\r\n\r\n\r\nclass EllipticSolver:\r\n \"\"\"Class Elliptic solves Poisson-type elliptic equations of the form:\r\n D^2 sol + fct sol = rhs\r\n where\r\n - D^2 is the flat Laplace operator\r\n - fct and rhs are user-supplied functions of the coordinates x, y, z,\r\n - and sol is the solution.\r\n\r\n To use this class:\r\n - initialize the class, providing Cartesian coordinates x, y, and z\r\n - call setup_matrix(fct) to set up the operator\r\n - call setup_rhs(rhs) to set up the right-hand side\r\n - then a call to solve() returns the solution sol\r\n \"\"\"\r\n\r\n def __init__(self, x, y, z):\r\n \"\"\"Constructor - provide Cartesian coordinates, all of length n_grid,\r\n as arguments.\r\n \"\"\"\r\n\r\n print(\" Setting up Poisson solver...\")\r\n self.n_grid = size(x)\r\n self.delta = x[1] - x[0]\r\n\r\n # set up storage for matrix, solution, r.h.s.\r\n # Note: \"sol\" and \"rhs\" will store functions in 3d format, while\r\n # \"sol_1d\" and \"rhs_1d\" will store functions in 1d format using\r\n # super-index\r\n nnn = self.n_grid ** 3\r\n self.rhs_1d = zeros(nnn)\r\n self.A = zeros((nnn, nnn))\r\n self.sol = zeros((self.n_grid, self.n_grid, self.n_grid))\r\n self.rad = zeros((self.n_grid, self.n_grid, self.n_grid))\r\n\r\n # compute radius\r\n for i in range(0, self.n_grid):\r\n for j in range(0, self.n_grid):\r\n for k in range(0, self.n_grid):\r\n rad2 = x[i] ** 2 + y[j] ** 2 + z[k] ** 2\r\n self.rad[i, j, k] = sqrt(rad2)\r\n\r\n\r\n def setup_matrix(self, fct):\r\n \"\"\"Set up matrix A.\"\"\"\r\n\r\n n_grid = self.n_grid\r\n\r\n # Use Robin boundary conditions to set up boundaries\r\n i = 0 # lower x-boundary\r\n for j in range(0, n_grid):\r\n for k in range(0, n_grid):\r\n index = self.super_index(i, j, k)\r\n self.A[index, index] = self.rad[i, j, k]\r\n self.A[index, index + 1] = -self.rad[i + 1, j, k]\r\n\r\n i = n_grid - 1 # upper x-boundary\r\n for j in range(0, n_grid):\r\n for k in range(0, n_grid):\r\n index = self.super_index(i, j, k)\r\n self.A[index, index] = self.rad[i, j, k]\r\n self.A[index, index - 1] = -self.rad[i - 1, j, k]\r\n\r\n j = 0 # lower y-boundary\r\n for i in range(1, n_grid - 1):\r\n for k in range(0, n_grid):\r\n index = self.super_index(i, j, k)\r\n self.A[index, index] = self.rad[i, j, k]\r\n self.A[index, index + n_grid] = -self.rad[i, j + 1, k]\r\n\r\n j = n_grid - 1 # upper y-boundary\r\n for i in range(1, n_grid - 1):\r\n for k in range(0, n_grid):\r\n index = self.super_index(i, j, k)\r\n self.A[index, index] = self.rad[i, j, k]\r\n self.A[index, index - n_grid] = -self.rad[i, j - 1, k]\r\n\r\n k = 0 # lower z-boundary\r\n for i in range(1, n_grid - 1):\r\n for j in range(1, n_grid - 1):\r\n index = self.super_index(i, j, k)\r\n self.A[index, index] = self.rad[i, j, k]\r\n self.A[index, index + n_grid * n_grid] = -self.rad[i, j, k + 1]\r\n\r\n k = n_grid - 1 # upper z-boundary\r\n for i in range(1, n_grid - 1):\r\n for j in range(1, n_grid - 1):\r\n index = self.super_index(i, j, k)\r\n self.A[index, index] = self.rad[i, j, k]\r\n self.A[index, index - n_grid * n_grid] = -self.rad[i, j, k - 1]\r\n\r\n # fill matrix in interior\r\n for i in range(1, n_grid - 1):\r\n for j in range(1, n_grid - 1):\r\n for k in range(1, n_grid - 1):\r\n index = self.super_index(i, j, k)\r\n\r\n # diagonal element\r\n self.A[index, index] = -6. + self.delta ** 2 * fct[i, j, k]\r\n\r\n # off-diagonal elements\r\n self.A[index, index - 1] = 1.0\r\n self.A[index, index + 1] = 1.0\r\n self.A[index, index - n_grid] = 1.0\r\n self.A[index, index + n_grid] = 1.0\r\n self.A[index, index - n_grid * n_grid] = 1.0\r\n self.A[index, index + n_grid * n_grid] = 1.0\r\n\r\n\r\n def setup_rhs(self, rhs):\r\n \"\"\"Setup right-hand side of matrix equation\"\"\"\r\n\r\n n_grid = self.n_grid\r\n for i in range(1, n_grid - 1):\r\n for j in range(1, n_grid - 1):\r\n for k in range(1, n_grid - 1):\r\n index = self.super_index(i, j, k)\r\n self.rhs_1d[index] = self.delta ** 2 * rhs[i, j, k]\r\n\r\n\r\n def solve(self):\r\n \"\"\"Interface to scipy.linalg matrix solver,\r\n returns sol (in 3d format).\"\"\"\r\n\r\n # solve matrix using scipy.linalg interface...\r\n sol_1d = la.solve(self.A, self.rhs_1d)\r\n\r\n # ... then translate from superindex to 3d\r\n for i in range(0, self.n_grid):\r\n for j in range(0, self.n_grid):\r\n for k in range(0, self.n_grid):\r\n index = self.super_index(i, j, k)\r\n self.sol[i, j, k] = sol_1d[index]\r\n\r\n return self.sol\r\n\r\n\r\n def super_index(self, i, j, k):\r\n \"\"\"Compute super index I:=i+Nj+N^2k.\"\"\"\r\n return i + self.n_grid * (j + self.n_grid * k)\r\n\r\n\r\n\r\nclass Puncture:\r\n \"\"\"Class that handles construction of puncture data.\r\n\r\n To use this class,\r\n - initialize class with physical parameters as arguments\r\n - then call construct_solution.\r\n \"\"\"\r\n\r\n def __init__(self, bh_loc, lin_mom, n_grid, x_out):\r\n \"\"\"Arguments to constructor specify physical parameters:\r\n - location of puncture (bh_loc)\r\n - linear momentum (lin_mom)\r\n - size of grid (n_grid)\r\n - outer boundary (x_out).\r\n \"\"\"\r\n self.bh_loc = bh_loc\r\n self.lin_mom = lin_mom\r\n # echo out parameters\r\n print(\" Constructing class Puncture for single black hole\")\r\n print(\" at bh_loc = (\", bh_loc[0], \",\", bh_loc[1], \",\",\r\n bh_loc[2], \")\") \r\n print(\" with momentum p = (\", lin_mom[0], \",\",\r\n lin_mom[1], \",\", lin_mom[2], \")\") \r\n print(\" Using\", n_grid,\"\\b^3 gridpoints with outer boundary at\", x_out)\r\n # set up grid\r\n self.n_grid = n_grid\r\n self.x_out = x_out\r\n self.delta = 2.0 * x_out / n_grid\r\n\r\n # set up coordinates: use cell-centered grid covering (-x_out, x_out)\r\n half_delta = self.delta / 2.0\r\n self.x = linspace(half_delta - x_out, x_out -\r\n half_delta, n_grid)\r\n self.y = linspace(half_delta - x_out, x_out -\r\n half_delta, n_grid)\r\n self.z = linspace(half_delta - x_out, x_out -\r\n half_delta, n_grid)\r\n\r\n # allocate elliptic solver\r\n self.solver = EllipticSolver(self.x, self.y, self.z)\r\n\r\n # allocate memory for functions u, alpha, beta, and residual\r\n self.alpha = zeros((n_grid, n_grid, n_grid))\r\n self.beta = zeros((n_grid, n_grid, n_grid))\r\n self.u = zeros((n_grid, n_grid, n_grid))\r\n self.res = zeros((n_grid, n_grid, n_grid))\r\n\r\n\r\n def construct_solution(self, tol, it_max):\r\n \"\"\"Construct solution iteratively, provide tolerance and maximum\r\n number of iterations as arguments.\"\"\"\r\n\r\n self.setup_alpha_beta()\r\n residual_norm = self.residual()\r\n print(\" Initial Residual = \", residual_norm)\r\n print(\" Using up to\", it_max, \"iteration steps to reach tolerance of\",\r\n tol)\r\n\r\n # now iterate...\r\n it_step = 0\r\n while residual_norm > tol and it_step < it_max:\r\n it_step += 1\r\n self.update_u()\r\n residual_norm = self.residual()\r\n print(\" Residual after\", it_step, \"iterations :\", residual_norm)\r\n if (residual_norm < tol):\r\n print(\" Done!\")\r\n else:\r\n print(\" Giving up...\")\r\n\r\n \r\n def update_u(self):\r\n \"\"\"Function that updates u using Poisson solver;\r\n takes one iteration step.\r\n \"\"\"\r\n\r\n # set up linear term and right-hand side for SolvePoisson...\r\n n_grid = self.n_grid\r\n fct = zeros((n_grid, n_grid, n_grid))\r\n rhs = zeros((n_grid, n_grid, n_grid))\r\n\r\n for i in range(1, n_grid - 1):\r\n for j in range(1, n_grid - 1):\r\n for k in range(1, n_grid - 1):\r\n # compute h'\r\n temp = self.alpha[i, j, k] * (1.0 + self.u[i, j, k]) + 1.0\r\n fct[i, j, k] = (-7.0 * self.beta[i, j, k] *\r\n self.alpha[i, j, k] / temp ** 8)\r\n rhs[i, j, k] = -self.res[i, j, k]\r\n\r\n # now update Poisson solver\r\n self.solver.setup_matrix(fct)\r\n\r\n # set up right-hand side\r\n self.solver.setup_rhs(rhs)\r\n\r\n # solve to find delta_u\r\n delta_u = self.solver.solve()\r\n\r\n # update u\r\n self.u += delta_u\r\n\r\n\r\n def residual(self):\r\n \"\"\"Evaluate residual.\"\"\"\r\n\r\n residual_norm = 0.0\r\n for i in range(1, self.n_grid - 1):\r\n for j in range(1, self.n_grid - 1):\r\n for k in range(1, self.n_grid - 1):\r\n\r\n # compute left-hand side: Laplace operator\r\n ddx = (self.u[i + 1, j, k] - 2.0 * self.u[i, j, k] +\r\n self.u[i - 1, j, k])\r\n ddy = (self.u[i, j + 1, k] - 2.0 * self.u[i, j, k] +\r\n self.u[i, j - 1, k])\r\n ddz = (self.u[i, j, k + 1] - 2.0 * self.u[i, j, k] +\r\n self.u[i, j, k - 1])\r\n lhs = (ddx + ddy + ddz) / self.delta ** 2\r\n\r\n # compute right-hand side,\r\n # recall h = - beta/(alpha + alpha u + 1)^7\r\n temp = self.alpha[i, j, k] * (1.0 + self.u[i, j, k]) + 1.0\r\n rhs = -self.beta[i, j, k] / temp ** 7\r\n\r\n # then compute difference to get residual\r\n self.res[i, j, k] = lhs - rhs\r\n residual_norm += self.res[i, j, k] ** 2\r\n\r\n residual_norm = sqrt(residual_norm) * self.delta ** 3\r\n return residual_norm\r\n\r\n\r\n def setup_alpha_beta(self):\r\n \"\"\"Set up functions alpha and beta.\"\"\"\r\n\r\n n_grid = self.n_grid\r\n p_x = self.lin_mom[0]\r\n p_y = self.lin_mom[1]\r\n p_z = self.lin_mom[2]\r\n\r\n for i in range(0, n_grid):\r\n for j in range(0, n_grid):\r\n for k in range(0, n_grid):\r\n s_x = self.x[i] - self.bh_loc[0]\r\n s_y = self.y[j] - self.bh_loc[1]\r\n s_z = self.z[k] - self.bh_loc[2]\r\n s2 = s_x ** 2 + s_y ** 2 + s_z ** 2\r\n s_bh = sqrt(s2)\r\n l_x = s_x / s_bh\r\n l_y = s_y / s_bh\r\n l_z = s_z / s_bh\r\n lP = l_x * p_x + l_y * p_y + l_z * p_z\r\n\r\n # construct extrinsic curvature\r\n fac = 3.0 / (2.0 * s2)\r\n A_xx = fac * (2.0 * p_x * l_x - (1.0 - l_x * l_x) * lP)\r\n A_yy = fac * (2.0 * p_y * l_y - (1.0 - l_y * l_y) * lP)\r\n A_zz = fac * (2.0 * p_z * l_z - (1.0 - l_z * l_z) * lP)\r\n A_xy = fac * (p_x * l_y + p_y * l_x + l_x * l_y * lP)\r\n A_xz = fac * (p_x * l_z + p_z * l_x + l_x * l_z * lP)\r\n A_yz = fac * (p_y * l_z + p_z * l_y + l_y * l_z * lP)\r\n\r\n # compute A_{ij} A^{ij}\r\n A2 = (\r\n A_xx ** 2 + A_yy ** 2 + A_zz ** 2 +\r\n 2.0*(A_xy ** 2 + A_xz ** 2 + A_yz ** 2)\r\n )\r\n\r\n # now compute alpha and beta from\r\n self.alpha[i, j, k] = 2.0 * s_bh\r\n self.beta[i, j, k] = self.alpha[i, j, k] ** 7 * A2 / 8.0\r\n\r\n\r\n#\r\n#=====================================================================\r\n# Main routine: defines parameters, sets up puncture solver, and\r\n# then finds solution\r\n#=====================================================================\r\n#\r\nif __name__ == '__main__':\r\n #\r\n # set default values for variables\r\n #\r\n # location of black hole:\r\n loc_x = 0.0\r\n loc_y = 1.0\r\n loc_z = 0.0\r\n # momentum of black hole:\r\n p_x = 1.0\r\n p_y = 0.0\r\n p_z = 0.0\r\n # number of grid points\r\n n_grid = 16\r\n # location of outer boundary\r\n x_out = 4.0\r\n # tolerance and maximum number of iterations\r\n tol = 1.0e-12\r\n it_max = 50\r\n\r\n # location of puncture\r\n bh_loc = ( loc_x, loc_y, loc_z )\r\n # linear momentum\r\n lin_mom = ( p_x, p_y, p_z )\r\n #\r\n # set up Puncture solver\r\n black_hole = Puncture(bh_loc, lin_mom, n_grid, x_out)\r\n #\r\n # and construct solution\r\n black_hole.construct_solution(tol, it_max)\r\n\r\n # which we can access as\r\n # black_hole.u[i,j,k]\r\n\r\n plot = True\r\n if plot:\r\n import matplotlib.pyplot as plt\r\n\r\n x_M = black_hole.x[0]\r\n plt.imshow(black_hole.u[:, :, n_grid // 2], origin='lower',\r\n extent=[-x_M, x_M, -x_M, x_M])\r\n plt.colorbar()\r\n plt.xlabel('x')\r\n plt.ylabel('y')\r\n plt.title('u(x,y,z={z_:.1f})'.format(z_=black_hole.z[n_grid // 2]))\r\n \r\n\r\n", "sub_path": "code/ex_05/example_05/puncture.py", "file_name": "puncture.py", "file_ext": "py", "file_size_in_byte": 13727, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.size", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.linalg.solve", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 368, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 373, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 373, "usage_type": "name"}]} +{"seq_id": "363224621", "text": "import logging\nfrom soaplib.core.service import DefinitionBase\nfrom com.zctt.iaap.paf.contrib.webservice.classmapper import *\nfrom com.zctt.iaap.paf.contrib.webservice import authorization\nfrom amfast import encoder,decoder\nfrom soaplib.core.service import soap\nfrom soaplib.core.model.primitive import *\nfrom soaplib.core.model.clazz import *\nfrom soaplib.core.model.binary import *\nfrom com.zctt.iaap.paf import functions, settings, entry, application\n\nlogger = logging.getLogger(\"com.zctt.iaap.paf.contrib.webservice\")\n\ndecode = decoder.Decoder(class_def_mapper=decode_class_mapper,amf3=True)\nencode = encoder.Encoder(class_def_mapper=class_mapper,amf3=True)\nclass realtraceservice(DefinitionBase):\n\n @soap(String,_returns=Attachment)\n def open_config(self,fileName):\n if not authorization.hasAuthorization(self.environ.im_self.request):\n return None\n returnValue = application.getService('RTCDRService').open_config(fileName,request=self.environ.im_self.request)\n returnValue = encode.encode(returnValue)\n document = Attachment(data=returnValue)\n return document\n\n\n @soap(Attachment,_returns=Attachment)\n def query(self,rqst):\n if not authorization.hasAuthorization(self.environ.im_self.request):\n return None\n _rqstdecodeValue=None\n if rqst:\n _rqstdecodeValue = decode.decode(rqst.data)\n returnValue = application.getService('RTCDRService').query(_rqstdecodeValue,request=self.environ.im_self.request)\n returnValue = encode.encode(returnValue)\n document = Attachment(data=returnValue)\n return document\n\n\n @soap(Attachment,_returns=Attachment)\n def export(self,rqst):\n if not authorization.hasAuthorization(self.environ.im_self.request):\n return None\n _rqstdecodeValue=None\n if rqst:\n _rqstdecodeValue = decode.decode(rqst.data)\n returnValue = application.getService('RTCDRService').export(_rqstdecodeValue,request=self.environ.im_self.request)\n returnValue = encode.encode(returnValue)\n document = Attachment(data=returnValue)\n return document\n\n\n @soap(String,String,Attachment,Attachment,String,_returns=String)\n def schedulerExport(self,appFileName,pageName,selectFields,filters,createtime):\n if not authorization.hasAuthorization(self.environ.im_self.request):\n return None\n _selectFieldsdecodeValue=None\n if selectFields:\n _selectFieldsdecodeValue = decode.decode(selectFields.data)\n _filtersdecodeValue=None\n if filters:\n _filtersdecodeValue = decode.decode(filters.data)\n returnValue = application.getService('RTCDRService').schedulerExport(appFileName,pageName,_selectFieldsdecodeValue,_filtersdecodeValue,createtime)\n return returnValue\n\n @soap(Attachment)\n def stop(self,traceId):\n if not authorization.hasAuthorization(self.environ.im_self.request):\n return None\n _traceIddecodeValue=None\n if traceId:\n _traceIddecodeValue = decode.decode(traceId.data)\n returnValue = application.getService('RTCDRService').stop(_traceIddecodeValue,request=self.environ.im_self.request)\n\n\n @soap(Attachment)\n def clearData(self,rqst):\n if not authorization.hasAuthorization(self.environ.im_self.request):\n return None\n _rqstdecodeValue=None\n if rqst:\n _rqstdecodeValue = decode.decode(rqst.data)\n returnValue = application.getService('RTCDRService').clearData(_rqstdecodeValue)\n\n def on_method_call(self,method_name,py_params,soap_params):\n logger.info('Call method %s' %method_name)\n\n def on_method_exception_object(self, exc):\n logger.info('Call method error:error=%s' %exc.faultstring)\n\nclassName=realtraceservice\nserviceName='realtraceservice'\n", "sub_path": "service/com/zctt/iaap/paf/contrib/webservice/webservice/realtraceservice.py", "file_name": "realtraceservice.py", "file_ext": "py", "file_size_in_byte": 3872, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "amfast.decoder.Decoder", "line_number": 14, "usage_type": "call"}, {"api_name": "amfast.decoder", "line_number": 14, "usage_type": "name"}, {"api_name": "amfast.encoder.Encoder", "line_number": 15, "usage_type": "call"}, {"api_name": "amfast.encoder", "line_number": 15, "usage_type": "name"}, {"api_name": "soaplib.core.service.DefinitionBase", "line_number": 16, "usage_type": "name"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization.hasAuthorization", "line_number": 20, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization", "line_number": 20, "usage_type": "name"}, {"api_name": "com.zctt.iaap.paf.application.getService", "line_number": 22, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.application", "line_number": 22, "usage_type": "name"}, {"api_name": "soaplib.core.service.soap", "line_number": 18, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization.hasAuthorization", "line_number": 30, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization", "line_number": 30, "usage_type": "name"}, {"api_name": "com.zctt.iaap.paf.application.getService", "line_number": 35, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.application", "line_number": 35, "usage_type": "name"}, {"api_name": "soaplib.core.service.soap", "line_number": 28, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization.hasAuthorization", "line_number": 43, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization", "line_number": 43, "usage_type": "name"}, {"api_name": "com.zctt.iaap.paf.application.getService", "line_number": 48, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.application", "line_number": 48, "usage_type": "name"}, {"api_name": "soaplib.core.service.soap", "line_number": 41, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization.hasAuthorization", "line_number": 56, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization", "line_number": 56, "usage_type": "name"}, {"api_name": "com.zctt.iaap.paf.application.getService", "line_number": 64, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.application", "line_number": 64, "usage_type": "name"}, {"api_name": "soaplib.core.service.soap", "line_number": 54, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization.hasAuthorization", "line_number": 69, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization", "line_number": 69, "usage_type": "name"}, {"api_name": "com.zctt.iaap.paf.application.getService", "line_number": 74, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.application", "line_number": 74, "usage_type": "name"}, {"api_name": "soaplib.core.service.soap", "line_number": 67, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization.hasAuthorization", "line_number": 79, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.contrib.webservice.authorization", "line_number": 79, "usage_type": "name"}, {"api_name": "com.zctt.iaap.paf.application.getService", "line_number": 84, "usage_type": "call"}, {"api_name": "com.zctt.iaap.paf.application", "line_number": 84, "usage_type": "name"}, {"api_name": "soaplib.core.service.soap", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "574272543", "text": "import copy\nimport numpy as np\nfrom ..utils import get_matrix_in_format, matrix_creation_function_for_format\nfrom scipy.sparse import issparse, csr_matrix\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\n\nclass MLClassifierBase(BaseEstimator, ClassifierMixin):\n \"\"\"Base class providing API and common functions for all multi-label classifiers.\n\n Parameters\n ----------\n\n classifier : scikit classifier type\n The base classifier that will be used in a class, will be automagically put under self.classifier for future access.\n require_dense : boolean\n Whether the base classifier requires input as dense arrays, False by default\n \"\"\"\n\n def __init__(self):\n super(MLClassifierBase, self).__init__()\n\n self.copyable_attrs = []\n\n def generate_data_subset(self, y, subset, axis):\n \"\"\"This function subsets the array of binary label vectors to include only certain labels. \n\n Parameters\n ----------\n\n y : array-like of array-likes\n An array-like of binary label vectors.\n\n subset: array-like of integers\n array of integers, indices that will be subsetted from array-likes in y\n\n axis: integer 0 for 'rows', 1 for 'labels', \n control variable for whether to return rows or labels as indexed by subset\n\n Returns\n -------\n\n multi-label binary label vector : array-like of array-likes of {0,1}\n array of binary label vectors including label data only for labels from parameter labels\n \"\"\"\n return_data = None\n if axis == 1:\n return_data = y.tocsc()[:, subset]\n elif axis == 0:\n return_data = y.tocsr()[subset, :]\n\n return return_data\n\n def ensure_input_format(self, X, sparse_format='csr', enforce_sparse=False):\n \"\"\"This function ensures that input format follows the density/sparsity requirements of base classifier. \n\n Parameters\n ----------\n\n X : array-like or sparse matrix, shape = [n_samples, n_features]\n An input feature matrix\n\n sparse_format: string\n Requested format of returned scipy.sparse matrix, if sparse is returned\n\n enforce_sparse : bool\n Ignore require_dense and enforce sparsity, useful internally\n\n Returns\n -------\n\n transformed X : array-like or sparse matrix, shape = [n_samples, n_features]\n If require_dense was set to true for input features in the constructor, \n the returned value is an array-like of array-likes. If require_dense is \n set to false, a sparse matrix of format sparse_format is returned, if \n possible - without cloning.\n \"\"\"\n is_sparse = issparse(X)\n\n if is_sparse:\n if self.require_dense[0] and not enforce_sparse:\n return X.toarray()\n else:\n if sparse_format is None:\n return X\n else:\n return get_matrix_in_format(X, sparse_format)\n else:\n if self.require_dense[0] and not enforce_sparse:\n # TODO: perhaps a check_array?\n return X\n else:\n return matrix_creation_function_for_format(sparse_format)(X)\n\n def ensure_output_format(self, y, sparse_format='csr', enforce_sparse=False):\n \"\"\"This function ensures that output format follows the density/sparsity requirements of base classifier. \n\n Parameters\n ----------\n\n y : array-like with shape = [n_samples] or [n_samples, n_outputs]; or sparse matrix, shape = [n_samples, n_outputs] \n An input feature matrix\n\n sparse_format: string\n Requested format of returned scipy.sparse matrix, if sparse is returned\n\n enforce_sparse : bool\n Ignore require_dense and enforce sparsity, useful internally\n\n Returns\n -------\n\n transformed y: array-like with shape = [n_samples] or [n_samples, n_outputs]; or sparse matrix, shape = [n_samples, n_outputs] \n If require_dense was set to True for input features in the constructor, \n the returned value is an array-like of array-likes. If require_dense is \n set to False, a sparse matrix of format sparse_format is returned, if \n possible - without cloning.\n \"\"\"\n is_sparse = issparse(y)\n\n if is_sparse:\n if self.require_dense[1] and not enforce_sparse:\n if y.shape[1] != 1:\n return y.toarray()\n elif y.shape[1] == 1:\n return np.ravel(y.toarray())\n else:\n if sparse_format is None:\n return y\n else:\n return get_matrix_in_format(y, sparse_format)\n else:\n if self.require_dense[1] and not enforce_sparse:\n # ensuring 1d\n if len(y[0]) == 1:\n return np.ravel(y)\n else:\n return y\n else:\n return matrix_creation_function_for_format(sparse_format)(y)\n\n def fit(self, X, y):\n \"\"\"Abstract class to implement to fit classifier according to X,y.\n\n Parameters\n ----------\n\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and n_features is the number of features.\n\n y : array-like, shape = [n_samples, n_labels]\n Binary label vectors with 1 if label should be applied and 0 if not.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n raise NotImplementedError(\"MLClassifierBase::fit()\")\n\n def predict(self, X):\n \"\"\"Abstract class to implement to perform classification on an array of test vectors X.\n\n Parameters\n ----------\n\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and n_features is the number of features.\n\n Returns\n -------\n\n y : array-like, shape = [n_samples, n_labels]\n Binary label vectors with 1 if label should be applied and 0 if not. n_labels is number of labels in the \n multi-label instance that the classifier was fit to.\n\n \"\"\"\n raise NotImplementedError(\"MLClassifierBase::predict()\")\n\n def get_params(self, deep=True):\n \"\"\"\n Introspection of classifier for search models like cross validation and grid\n search.\n Parameters\n ----------\n deep : boolean\n If true all params will be introspected also and appended to the output dict.\n Returns\n -------\n out : dictionary\n Dictionary of all parameters and their values. If deep=True the dictionary\n also holds the parameters of the parameters.\n \"\"\"\n\n out = dict()\n\n for attr in self.copyable_attrs:\n out[attr] = getattr(self, attr)\n\n if hasattr(getattr(self, attr), 'get_params') and deep:\n deep_items = list(getattr(self, attr).get_params().items())\n out.update((attr + '__' + k, val) for k, val in deep_items)\n\n return out\n\n def set_params(self, **parameters):\n \"\"\"\n Set parameters as returned by `get_params`.\n @see https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py#L243\n \"\"\"\n\n if not parameters:\n return self\n\n valid_params = self.get_params(deep=True)\n\n\n parameters_current_level = [x for x in parameters if '__' not in x]\n for parameter in parameters_current_level:\n value = parameters[parameter]\n\n if parameter in valid_params:\n setattr(self, parameter, value)\n else:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (parameter, self))\n\n\n parameters_below_current_level = [x for x in parameters if '__' in x]\n parameters_grouped_by_current_level = {object : {} for object in valid_params}\n\n for parameter in parameters_below_current_level:\n object_name, sub_param = parameter.split('__', 1)\n\n if object_name not in parameters_grouped_by_current_level:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (object_name, self))\n\n value = parameters[parameter]\n parameters_grouped_by_current_level[object_name][sub_param] = value\n\n valid_params = self.get_params(deep=True)\n\n # parameters_grouped_by_current_level groups valid parameters for subojects\n for object_name, sub_params in parameters_grouped_by_current_level.items():\n if len(sub_params) > 0:\n sub_object = valid_params[object_name]\n sub_object.set_params(**sub_params)\n\n return self\n", "sub_path": "skmultilearn/base/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 9373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.base.BaseEstimator", "line_number": 8, "usage_type": "name"}, {"api_name": "sklearn.base.ClassifierMixin", "line_number": 8, "usage_type": "name"}, {"api_name": "scipy.sparse.issparse", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.get_matrix_in_format", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.matrix_creation_function_for_format", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.sparse.issparse", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 126, "usage_type": "call"}, {"api_name": "utils.get_matrix_in_format", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 136, "usage_type": "call"}, {"api_name": "utils.matrix_creation_function_for_format", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "599510354", "text": "import logging\nfrom datetime import datetime\nimport os\nfrom functools import reduce\n\nimport requests\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_data(url):\n try:\n data = requests.get(url)\n if data.status_code != 200:\n logging.error(f'Ошибка! {url} Код ответа: {data.status_code}')\n exit(1)\n else:\n logging.info(f'{url} Данные успешно получены.')\n except Exception as e:\n logging.exception(f'Ошибка! {e}')\n exit(1)\n return data.json()\n\n\ndef stringify(user_id, tasks):\n user_tasks = filter(lambda task: task['userId'] == user_id, tasks)\n\n def helper(acc, task):\n if task['completed']:\n acc[0] += cut(task['title'])\n else:\n acc[1] += cut(task['title'])\n return acc\n\n return reduce(helper, user_tasks, ['', ''])\n\n\ndef cut(string):\n if len(string) > 50:\n string = f'{string[:50]}...'\n return string + '\\n'\n\n\ndef main():\n users = get_data('https://json.medrating.org/users')\n tasks = get_data('https://json.medrating.org/todos')\n\n if not os.path.exists('tasks'):\n os.mkdir('tasks')\n\n for user in users:\n date = datetime.now().strftime('%d.%m.%Y %H:%M')\n [complete, incomplete] = stringify(user['id'], tasks)\n\n text = f'''\n{user[\"name\"]} <{user[\"email\"]}> {date}\n{user[\"company\"][\"name\"]}\n\nЗавершенные задачи:\n{complete.strip()}\n\nОставшиеся задачи:\n{incomplete.strip()}\n'''\n filename = f'tasks/{user[\"username\"]}.txt'\n if os.path.exists(filename):\n mod_date = datetime.fromtimestamp(os.path.getmtime(filename)).strftime(\"%Y-%m-%dT%H:%M\")\n os.renames(filename, f'{filename[:-4]}_{mod_date}.txt')\n with open(f'tasks/{user[\"username\"]}.txt', 'w', encoding='utf8') as f:\n f.write(text.strip())\n\n logging.info('OK. Отчеты успешно сформированы.')\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "reports.py", "file_name": "reports.py", "file_ext": "py", "file_size_in_byte": 2020, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 8, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 20, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.getmtime", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.renames", "line_number": 68, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "270710083", "text": "\"\"\"\nThis module handles the Slack implementation of the Google Drive REST API.\nIt relies on an ExtendedSlackClient.\n\nAlso includes auxiliary methods which use contents of files.list in the Drive v3 REST API\n\nDerived from: https://developers.google.com/drive/api/v3/quickstart/python#step_3_set_up_the_sample\n\"\"\"\n\nfrom oauth2client import file, client, tools\nfrom httplib2 import Http\nfrom googleapiclient.discovery import build\nfrom bot import Violet\n\nlink_t = \"https://drive.google.com/file/d/{0}/view?usp=sharing\"\n\n\ndef get_link_from_id(f_id: str):\n return link_t.format(f_id)\n\n\ndef get_link_from_name(files, filename):\n for f in files:\n if f['name'].lower() == filename.lower():\n return link_t.format(f[\"id\"])\n return None\n\n\ndef get_contents_from_id(files, f_id: str):\n out = []\n print(\"file:\", f_id)\n for file in files:\n try:\n if f_id in file[\"parents\"]:\n out.append(file)\n except KeyError:\n pass\n return out\n\n\ndef get_from_id(files, f_id: str):\n for file in files:\n if f_id == file[\"id\"]:\n return file\n return None\n\n\ndef get_from_name(files, filename: str):\n out = []\n for file in files:\n try:\n if filename.lower() in file[\"name\"].lower():\n out.append(file)\n except KeyError:\n pass\n return out\n\n\ndef add_drive_integration_commands(violet: Violet):\n \"\"\"\n Launches the Google Drive REST API service and adds relevant\n commands to the submitted client.\n\n :param violet: the client having commands added to its functions\n \"\"\"\n d_cfg = violet.config[\"drive\"]\n print(f\"Launching Google Drive with scope {d_cfg['scope']}...\")\n\n # Get credentials\n store = file.Storage(d_cfg[\"credentials\"])\n creds = store.get()\n if not creds or creds.invalid:\n scope = f\"{d_cfg['scope url']}{d_cfg['scope']}\"\n flow = client.flow_from_clientsecrets(d_cfg[\"client secret\"], scope)\n creds = tools.run_flow(flow, store)\n\n # Launch Google Drive REST API v3 service\n service = build('drive', 'v3', http=creds.authorize(Http()))\n\n # Get files\n frame = f\"files({','.join(violet.config['file_attr'].tags())})\"\n svc_list = None\n newline = \"\\n\"\n\n def refresh_files():\n \"\"\"Retrieve a 'fresh' copy of files.list using the specified frame\n\n \"\"\"\n print(\"Updating files.list...\")\n grab = service.files().list(fields=frame).execute().get('files', [])\n out = []\n if violet.config[\"init\"][\"with trash\"] == \"False\":\n for e in grab:\n if not e[\"trashed\"]:\n out.append(e)\n return out\n\n svc_list = refresh_files()\n\n print(\"Adding Drive commands to ExtendedClient...\")\n\n def id_command(*args):\n \"\"\"Provides a file resource by its id\"\"\"\n file_retrieved = get_from_id(svc_list, args[0])\n if file_retrieved:\n return f\"```{newline.join([g + ': ' + file_retrieved[g] for g in file_retrieved])}```\"\n\n def search_command(*args):\n \"\"\"Produces a list of files which contain the given search phrase\n\n Arbitrarily restricted by L since it's a simple wall of text at the moment\n \"\"\"\n L = 10\n files_retrieved = get_from_name(svc_list, ' '.join(args[0:]))\n print(\"get_from_name done\")\n if files_retrieved:\n return f\"```{newline.join([g['name'] for g in files_retrieved[:L]])}```\"\n\n def in_command(*args):\n \"\"\"Retrieves a folder's contents by its id\"\"\"\n folder_retrieved = get_contents_from_id(svc_list, args[0])\n if folder_retrieved:\n return f\"```{newline.join([g['name'] + ': ' + g['id'] for g in folder_retrieved])}```\"\n\n def list_command(*args):\n \"\"\"Shows all files\n\n It's long. Probably best to avoid this until it's paginated, etc\n \"\"\"\n del args\n return f\"```{newline.join([g['name'] + ': ' + g['id'] for g in svc_list])}```\"\n\n def link_command(*args):\n \"\"\"Converts a file id into a link to that file\"\"\"\n return get_link_from_id(args[0])\n\n def linkn_command(*args):\n \"\"\"Converts a file by its name into a link to that file\"\"\"\n getted = get_link_from_name(svc_list, ' '.join(args[1:]))\n if getted:\n return f\"```{newline.join([g + ':' + str(getted[g]) for g in getted.keys()])}```\"\n\n violet.add_command(f=id_command, name=\"id\")\n violet.add_command(f=search_command, name=\"search\")\n violet.add_command(f=in_command, name=\"in\")\n violet.add_command(f=list_command, name=\"list\")\n violet.add_command(f=link_command, name=\"link\")\n violet.add_command(f=linkn_command, name=\"linkn\")\n", "sub_path": "ikm/gdrive_v3.py", "file_name": "gdrive_v3.py", "file_ext": "py", "file_size_in_byte": 4710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "oauth2client.file", "line_number": 32, "usage_type": "name"}, {"api_name": "oauth2client.file", "line_number": 34, "usage_type": "name"}, {"api_name": "oauth2client.file", "line_number": 35, "usage_type": "argument"}, {"api_name": "oauth2client.file", "line_number": 42, "usage_type": "name"}, {"api_name": "oauth2client.file", "line_number": 43, "usage_type": "name"}, {"api_name": "oauth2client.file", "line_number": 44, "usage_type": "name"}, {"api_name": "oauth2client.file", "line_number": 50, "usage_type": "name"}, {"api_name": "oauth2client.file", "line_number": 52, "usage_type": "name"}, {"api_name": "oauth2client.file", "line_number": 53, "usage_type": "argument"}, {"api_name": "bot.Violet", "line_number": 59, "usage_type": "name"}, {"api_name": "oauth2client.file.Storage", "line_number": 70, "usage_type": "call"}, {"api_name": "oauth2client.file", "line_number": 70, "usage_type": "name"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 74, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 74, "usage_type": "name"}, {"api_name": "oauth2client.tools.run_flow", "line_number": 75, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 75, "usage_type": "name"}, {"api_name": "googleapiclient.discovery.build", "line_number": 78, "usage_type": "call"}, {"api_name": "httplib2.Http", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "451393926", "text": "#!/usr/bin/env python3\r\n\r\n# Import internal modules\r\ntry:\r\n from sys import argv\r\n from os import name, system\r\n from webbrowser import open_new_tab\r\nexcept Exception as error:\r\n print(error)\r\n exit()\r\n\r\n# install modules\r\ntry:\r\n system(\"pip install requests\")\r\n system(\"pip install colorama\")\r\nexcept Exception as error:\r\n print(error)\r\n exit()\r\n\r\n# Import external modules\r\ntry:\r\n from requests import head\r\n from colorama import init, Fore\r\nexcept Exception as error:\r\n print(error)\r\n exit()\r\n \r\n# Colors\r\ndefault = Fore.RESET\r\nlred = Fore.LIGHTRED_EX\r\nlblue = Fore.LIGHTBLUE_EX\r\nlgreen = Fore.LIGHTGREEN_EX\r\n\r\n# Headers to check\r\nsecurity_headers = [\r\n \"Strict-Transport-Security\", \"Access-Control-Allow-Origin\",\r\n \"X-XSS-Protection\", \"X-Frame-Options\", \"X-Content-Type-Options\",\r\n \"Content-Security-Policy\"\r\n ]\r\n\r\n# Headers and their configs\r\nvalid_values = {\r\n \"Strict-Transport-Security\": \"max-age=<expire-time>\",\r\n \"Access-Control-Allow-Origin\": \"<origin>\",\r\n \"X-XSS-Protection\": \"1; mode=block\",\r\n \"X-Frame-Options\": f\"DENY\\t{lblue}[OR]->\\t{lred}SAMEORIGIN\",\r\n \"X-Content-Type-Options\": \"nosniff\",\r\n \"Content-Security-Policy\": \"Content-Security-Policy: default-src 'self'\"\r\n }\r\n\r\ncorrent_values = {}\r\n\r\n# Links to fix\r\nlinks = {\"Strict_Transport_Security\":\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security\",\r\n\"Access_Control_Allow_Origin\":\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin\",\r\n\"X_XSS_Protection\":\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection\",\r\n\"X_Frame_Options\":\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options\",\r\n\"X_Content_Type_Options\":\"https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options\",\r\n\"Content_Security_Policy\":\"https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\"}\r\n\r\n# Settings\r\nexist_header = []\r\nnot_exist_header = []\r\norigin_header = \"test.com\"\r\nseperator = \"\"\r\nscript = \"\"\r\n\r\n# Clear screen and add colors to windows cmd\r\ndef clear():\r\n global seperator, script\r\n if name == \"posix\":\r\n system(\"clear\")\r\n print(default)\r\n seperator = \"/\"\r\n else:\r\n system(\"cls\")\r\n init()\r\n print(default)\r\n seperator = \"\\\\\"\r\n \r\n script = argv[0].split(seperator)[-1]\r\n\r\n# Help\r\ndef help():\r\n print(f\"{lblue}Security Headers check v1.0 *-* created by mehran-seiflainia *-*\\n\\t{lgreen}[Usage]: {default}{script} # Run the script normally \\\r\n \\n\\t\\t {script} --cors # Run the script with an 'Origin' header in requests. \\\r\n \\n\\t\\t {script} --cors DOMAIN # Run the script with an custom 'Origin' header. \\\r\n \\n\\t{lgreen}[Examples]:\\n\\t\\t{default}{script}\\n\\t\\t{script} --cors\\n\\t\\t{script} --cors attacker.com\")\r\n exit()\r\n\r\n# Get headers from the site.\r\ndef get_headers(url, cors, origin_header):\r\n try:\r\n if cors == True:\r\n response = head(url, headers = {\"Origin\":origin_header}, allow_redirects=True, verify=False)\r\n else:\r\n response = head(url, allow_redirects=True, verify=False)\r\n headers = (response.headers)\r\n return headers.items()\r\n except Exception as error:\r\n print(f\"{lred}[!] CONNECTION ERROR:\\n{error}{default}\")\r\n exit()\r\n\r\n# Check all security headers to exist\r\ndef is_exist(headers):\r\n try:\r\n for header in headers:\r\n if header[0] in security_headers:\r\n exist_header.append(header[0])\r\n corrent_values.update({f\"{header[0]}\": f\"{header[1]}\"})\r\n for header in security_headers:\r\n if header not in exist_header:\r\n not_exist_header.append(header)\r\n except Exception as error:\r\n print(error)\r\n\r\n# Print headers on screen\r\ndef printer(url, headers):\r\n try:\r\n print(f\"{lred}TARGET: {default}{url}\\n\\n\")\r\n print(f\"{lred}-- [Not implemented] --{default}\")\r\n for header in not_exist_header:\r\n print(f\"{lred}{header}{default}\")\r\n\r\n print(\"\\n\")\r\n print(f\"{lgreen}-- [Implemented] --{default}\")\r\n for header in exist_header:\r\n print(f\"{lgreen}{header}{default}\")\r\n print(f\"\\t{lgreen}[*] {lblue}The corrent Value is\\n\\t\\t{lred}\" + corrent_values[header] + f\"{default}\\n\")\r\n print(f\"\\t{lgreen}[*] {lblue}The best value is\\n\\t\\t{lred}\" + valid_values[header] + f\"{default}\\n\")\r\n\r\n except Exception as error:\r\n print(error)\r\n\r\n# Check script parameters\r\ndef check_params():\r\n global origin_header\r\n if len(argv) == 1:\r\n return False\r\n elif len(argv) == 2 and argv[1] == \"--cors\":\r\n return True\r\n elif len(argv) == 3 and argv[1] == \"--cors\":\r\n origin_header = argv[2]\r\n return True\r\n else:\r\n help()\r\n\r\n# Show links in browser\r\ndef web_view(starter):\r\n if starter:\r\n for link in not_exist_header:\r\n link = link.replace(\"-\", \"_\")\r\n open_new_tab(links[link])\r\n else:\r\n print(f\"\\n{lblue}Goodbye ;){default}\")\r\n\r\n# Main\r\ndef main():\r\n try:\r\n clear()\r\n cors = check_params()\r\n if cors == False:\r\n print(f\"{lblue}[*] It's possible to sent 'Origin' header in your request check {lred}{script} -h {lblue}[*]{default}\")\r\n url = input(\"Please enter the URL --> \")\r\n if \"http\" not in url:\r\n url = \"https://\" + url\r\n headers = get_headers(url, cors, origin_header)\r\n is_exist(headers)\r\n printer(url, headers)\r\n web_view = input(f\"\\n{default}[*_*] Do you like learn how to implement missed headers? (y/N) -> \").lower()\r\n if web_view == \"yes\" or web_view == \"y\":\r\n return True\r\n else:\r\n return False\r\n except Exception as error:\r\n print(error)\r\n\r\n# Run\r\nweb_view(main())\r\n", "sub_path": "SecurityHeadersCheck.py", "file_name": "SecurityHeadersCheck.py", "file_ext": "py", "file_size_in_byte": 6008, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.system", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 15, "usage_type": "call"}, {"api_name": "colorama.Fore.RESET", "line_number": 29, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 29, "usage_type": "name"}, {"api_name": "colorama.Fore.LIGHTRED_EX", "line_number": 30, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 30, "usage_type": "name"}, {"api_name": "colorama.Fore.LIGHTBLUE_EX", "line_number": 31, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 31, "usage_type": "name"}, {"api_name": "colorama.Fore.LIGHTGREEN_EX", "line_number": 32, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 32, "usage_type": "name"}, {"api_name": "os.name", "line_number": 71, "usage_type": "name"}, {"api_name": "os.system", "line_number": 72, "usage_type": "call"}, {"api_name": "os.system", "line_number": 76, "usage_type": "call"}, {"api_name": "colorama.init", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 81, "usage_type": "name"}, {"api_name": "requests.head", "line_number": 95, "usage_type": "call"}, {"api_name": "requests.head", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 138, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 140, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 142, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 143, "usage_type": "name"}, {"api_name": "webbrowser.open_new_tab", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "568235297", "text": "from PyQt4 import QtCore, QtGui\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\n\nfrom TelemetryLayer.lib.tlsettings import tlSettings as Settings\nfrom TelemetryLayer.lib.tlsettings import tlSettings as Settings\nfrom TelemetryLayer.lib.tlsettings import tlConstants as Constants\nfrom TelemetryLayer.lib.tllogging import tlLogging as Log\nfrom TelemetryLayer.tlbrokers import tlBrokers as Brokers\nfrom TelemetryLayer.tlmqttclient import *\nfrom TelemetryLayer.topicmanagers.agsense.agdevice import agDeviceList, agDevice, agParams, agParam\n\nimport os.path,sys,traceback,json\n\n\n\n\nclass tlTableParam(QObject):\n \"\"\"\n Todo: refactor this - place Table Parameter handling into\n package of dialog widget utils\n \n \"\"\"\n kLabel = 0\n kControl = 1\n\n \"\"\"\n Populates a table widget with a set of widgets (one one per row) defined by deviceType\n Todo: add isDirty method - currently dirty regardless\n \"\"\"\n\n def __init__(self, tbl, row, param, default=None):\n super(tlTableParam, self).__init__()\n tbl.insertRow(row)\n\n self.row = row\n self.tbl = tbl\n self.value = default\n self.control = QWidget()\n\n try:\n self.name = param['name']\n self.title = param['title']\n self.tooltip = param['desc']\n self.type = param['type']\n self.default = default\n \n self.readonly= param['readonly']\n\n except TypeError as e:\n Log.warn('Type error creating paramater widget ' + str(e))\n\n item = QtGui.QLabel(self.title, self.tbl)\n item.setStyleSheet(\"padding: 4px\")\n item.setWordWrap(True)\n item.setToolTip(self.tooltip)\n\n self.tbl.setCellWidget(row, self.kLabel, item)\n\n item = QtGui.QTableWidgetItem(0)\n item.setFlags(QtCore.Qt.NoItemFlags)\n tbl.setItem(row, self.kLabel, item)\n\n pass\n\n def _setControl(self, height=None):\n self.tbl.setCellWidget(self.row, self.kControl, self.control)\n self.control.setEnabled(not self.readonly)\n item = QtGui.QTableWidgetItem(0)\n item.setFlags(QtCore.Qt.NoItemFlags)\n self.tbl.setItem(self.row, self.kLabel, item)\n self.tbl.horizontalHeader().setStretchLastSection(True)\n if height is not None:\n self.tbl.setRowHeight(self.row, height)\n\n\n def getName(self):\n try:\n return self.name\n except:\n return None\n\n def getTitle(self):\n return self.title\n\n def getValue(self):\n return self.value\n\n def getType(self):\n return self.type\n\n\n# Create a spin box\n\nclass tlTableParamSpinBox(tlTableParam):\n def __init__(self, tbl, row, param, default=None):\n super(tlTableParamSpinBox, self).__init__(tbl, row, param, default)\n\n try:\n self.min = param['min']\n self.max = param['max']\n self.int = param['interval']\n self.units = param['units']\n try:\n self.step = param['step']\n except:\n self.step = \"1\"\n\n self.control = QtGui.QSpinBox(self.tbl)\n self.control.setMinimum(int(self.min) - 1) # Qt Bug Min is actual > not >=\n self.control.setMaximum(int(self.max))\n self.control.setSingleStep(int(self.step)) # ???\n self.control.setToolTip(self.tooltip)\n self.control.setSuffix('\\x0A' + self.units)\n self.control.setStyleSheet(\"padding: 4px\")\n self.control.valueChanged.connect(self.setValue)\n self._setControl(40)\n self.control.setValue(int(self.default))\n # self.control.valueChanged.connect(self.setDirty)\n\n except Exception as e:\n Log.debug('Error loading parameter widget ' + str(e))\n return\n\n pass\n\n def setValue(self, value):\n self.value = value\n\n\nclass tlTableParamCheckBox(tlTableParam):\n def __init__(self, tbl, row, param, default=None):\n super(tlTableParamCheckBox, self).__init__(tbl, row, param, default)\n\n try:\n self.control = QtGui.QCheckBox(self.tbl)\n self.control.setToolTip(self.tooltip)\n self.control.setStyleSheet(\"padding: 4px\")\n self.control.stateChanged.connect(self.setValue)\n self.control.setTristate(False);\n self._setControl(40)\n self.control.setChecked(self.default == 'On')\n\n\n except Exception as e:\n Log.debug('Error loading parameter widget ' + str(e))\n return\n\n pass\n\n def setValue(self, value):\n self.value = value\n \n def getValue(self):\n if self.control.isChecked():\n return 'On'\n else:\n return 'Off'\n\n\n# Create a Slider\n\nclass tlTableParamSlider(tlTableParam):\n def __init__(self, tbl, row, param, default=None):\n\n super(tlTableParamSlider, self).__init__(tbl, row, param, default)\n try:\n self.min = param['min']\n self.max = param['max']\n self.int = param['interval']\n self.units = param['units']\n try:\n self.step = param['step']\n except:\n self.step = \"1\"\n\n # Only handles Integers currently!\n\n self.control = QtGui.QSlider(QtCore.Qt.Horizontal, self.tbl)\n self.control.setStyleSheet(\"padding: 4px\")\n self.control.setFocusPolicy(QtCore.Qt.ClickFocus)\n # self.control.setTickPosition(QtGui.QSlider.TicksBelow)\n #self.control.setTickInterval(int(float(self.max)/50))\n self.control.setSingleStep(int(self.step))\n self.control.setMinimum(int(self.min))\n self.control.setMaximum(int(self.max))\n self.control.setToolTip(self.tooltip)\n self.control.valueChanged.connect(self.setValue)\n self._setControl(50)\n self.control.setValue(int(self.default))\n\n except Exception as e:\n Log.warn('Error creating widget parameter ' + str(e))\n return\n\n def setValue(self, value):\n self.value = value\n item = self.tbl.cellWidget(self.row, self.kLabel)\n item.setText(self.title + ' ' + str(value) + ' ' + self.units)\n pass\n\n\n# Create a Dropdown\n\nclass tlTableParamCombo(tlTableParam):\n def __init__(self, tbl, row, param, default=None):\n\n super(tlTableParamCombo, self).__init__(tbl, row, param, default)\n\n # Only handles Integers currently!\n\n self.control = QtGui.QComboBox(tbl)\n self.control.setToolTip(self.tooltip)\n idx = 0\n defidx = 0\n for option in param['options']:\n self.control.insertItem(idx, option.text, option)\n if hasattr(option,'value') and option['value'] == self.default:\n defidx = idx\n idx += 1\n\n self.control.currentIndexChanged.connect(self.setValue)\n self.control.setToolTip(self.tooltip)\n self._setControl()\n # self.tbl.setRowHeight(row,100)\n self.control.setCurrentIndex(defidx)\n\n def setValue(self, idx):\n self.value = self.control.itemData(idx).get('value')\n pass\n\n\n\"\"\"\nPopulate a Combo Box with widgets to handle server\nbased parameters defined in device types, and return\nvalues to be stored in the device map.\n\n\"\"\"\n\n\nclass agParameterTable(QObject):\n _params = []\n\n def __init__(self, tableWidget, params):\n super(agParameterTable, self).__init__()\n _params = params\n self._params =[]\n\n tblParam = tableWidget\n tblParam.horizontalHeader().setVisible(False)\n tblParam.verticalHeader().setVisible(False)\n\n tblParam.clearContents()\n tblParam.setRowCount(0)\n tblParam.setShowGrid(True)\n tblParam.setColumnCount(2)\n \n if _params is None or len(_params) == 0:\n return\n\n # Create a table of controls preset with existing values if required\n try:\n\n for param in _params:\n if 'value' in param: \n default = param['value']\n else:\n default = param['default']\n \n if param['widget'] == 'slider':\n self._params.append(tlTableParamSlider(tblParam, tblParam.rowCount(), param, default))\n if param['widget'] == 'select':\n self._params.append(tlTableParamCombo(tblParam, tblParam.rowCount(), param, default))\n if param['widget'] == 'spinbox':\n self._params.append(tlTableParamSpinBox(tblParam, tblParam.rowCount(), param, default))\n if param['widget'] == 'checkbox':\n self._params.append(tlTableParamCheckBox(tblParam, tblParam.rowCount(), param, default))\n except KeyError as e:\n Log.warn(\"Error parsing configuration parameters \" + str(e))\n exc_type, exc_value, exc_traceback = sys.exc_info()\n Log.debug(repr(traceback.format_exception(exc_type, exc_value,\n exc_traceback)))\n\n def params(self):\n params = {}\n for param in self._params:\n params[param.getName()] = param.getValue()\n\n return params\n \n def __iter__(self):\n return self.params().iteritems()\n\nclass agConfigView(QObject):\n def __init__(self, tabs, tLayer, feature): # change to broker?\n super(agConfigView, self).__init__()\n self._tabs = tabs\n self._feature = feature\n self._broker = tLayer.getBroker()\n self._topicManager = tLayer.topicManager()\n self._pTable = None\n self._rebuild()\n self._tabs.btnApply.clicked.connect(self._apply)\n self._tabs.btnReload.clicked.connect(self._reload)\n\n def _rebuild(self,mqtt =None, status = True, msg = None):\n if not status:\n Log.progress(\"There was an error reading the device configurations for this broker: \" +str(msg));\n return\n try:\n topic = self._broker.topic(self._feature['topic'])\n self._params = topic['params']\n self._pTable = agParameterTable(self._tabs.tblParams, self._params)\n# self._tabs.btnApply.setEnabled(False)\n except Exception as e:\n Log.debug(\"Error loading Configuration tab \" + str(e))\n\n def _reload(self):\n self._topicManager._requestDevices(self._rebuild)\n\n def _updateBroker(self,mqtt, status = True, msg = None):\n Log.debug(\"_updateBroker! \" + str(status))\n if not status: \n Log.warn(msg)\n return\n self._topicManager.setDevices(agDeviceList(msg.payload))\n self._broker.setTopics(self._topicManager.getTopics())\n self._broker.setDirty(True)\n Brokers.instance().update(self._broker)\n Brokers.instance().sync(True)\n Log.debug(\"Broker updated\")\n\n def _applied(self, client, status = True, msg = None):\n if status == False:\n Log.progress(\"Unable to update device settings - restoring\")\n self._rebuild()\n else:\n Log.progress(\"Configuration updated\")\n Log.debug(\"Updating Devices\")\n self._topicManager._requestDevices(self._updateBroker)\n pass\n \n def _apply(self):\n _client = None\n try:\n params = {\"topic\":self._feature['topic']}\n for key,val in self._pTable:\n params[key] = val\n payload = json.dumps(params)\n request = \"agsense/request/set\"\n Log.progress(\"Updating configuration\")\n \n _client = tlMqttSingleShot(self,\n self._broker,\n request,\n [\"agsense/response/set\"],\n payload,\n 0, #qos\n self._applied)\n \n _client.run()\n except Exception as e:\n Log.debug(\"Error setting parameter \" + str(e))\n if _client:\n _client.kill()\n\n\n def show(self):\n pass\n\n def update(self, data):\n pass\n\n def _error(self, mqtt, msg=\"\"):\n Log.progress(msg)\n", "sub_path": "topicmanagers/agsense/agutils.py", "file_name": "agutils.py", "file_ext": "py", "file_size_in_byte": 12364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "TelemetryLayer.lib.tllogging.tlLogging.warn", "line_number": 52, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTableWidgetItem", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTableWidgetItem", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 71, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 71, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpinBox", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 110, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 123, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 123, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QCheckBox", "line_number": 137, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 137, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 147, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 147, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSlider", "line_number": 180, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 180, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 180, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 180, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 182, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 182, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.warn", "line_number": 194, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 194, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QComboBox", "line_number": 213, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 213, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.warn", "line_number": 280, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 280, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 281, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 282, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 282, "usage_type": "name"}, {"api_name": "traceback.format_exception", "line_number": 282, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.progress", "line_number": 309, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 309, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 317, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 317, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 323, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 323, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.warn", "line_number": 325, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 325, "usage_type": "name"}, {"api_name": "TelemetryLayer.topicmanagers.agsense.agdevice.agDeviceList", "line_number": 327, "usage_type": "call"}, {"api_name": "TelemetryLayer.tlbrokers.tlBrokers.instance", "line_number": 330, "usage_type": "call"}, {"api_name": "TelemetryLayer.tlbrokers.tlBrokers", "line_number": 330, "usage_type": "name"}, {"api_name": "TelemetryLayer.tlbrokers.tlBrokers.instance", "line_number": 331, "usage_type": "call"}, {"api_name": "TelemetryLayer.tlbrokers.tlBrokers", "line_number": 331, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 332, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 332, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.progress", "line_number": 336, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 336, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.progress", "line_number": 339, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 339, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 340, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 340, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 350, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.progress", "line_number": 352, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 352, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.debug", "line_number": 364, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 364, "usage_type": "name"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging.progress", "line_number": 376, "usage_type": "call"}, {"api_name": "TelemetryLayer.lib.tllogging.tlLogging", "line_number": 376, "usage_type": "name"}]} +{"seq_id": "472290288", "text": "from collections import Counter\nfriends = graph.get_object('me', fields=['friends.limit(1000)'])['friends']['data']\nstat = Counter()\nfor f in friends:\n key = \"likes\"\n obj = graph.get_object(f['id'], fields=json.dumps(['id','name', key]))\n if key in obj:\n print(f['id'], f['name'])\n for item in obj[key]['data']:\n print(\"[\"+item['name']+\"]\", end=\" \") \n stat[item['name']] += 1\n print()\npprint(stat.most_common(15))\n", "sub_path": "Week09/q_likes.py", "file_name": "q_likes.py", "file_ext": "py", "file_size_in_byte": 467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.Counter", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "47482717", "text": "\"\"\"Add table WorkflowOverforingspakke\n\nRevision ID: 4ce1131b0075\nRevises: 9b4f6d9d1f7c\nCreate Date: 2021-06-01 16:20:31.240409\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\nfrom sqlalchemy.dialects import postgresql\n\n\n# revision identifiers, used by Alembic.\nrevision = '4ce1131b0075'\ndown_revision = '9b4f6d9d1f7c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table('workflow_overforingspakke',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('overforingspakke_id', sa.Integer(), nullable=False),\n sa.Column('workflow_navn', sa.String(), nullable=False),\n sa.Column('workflow_uid', postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column('opprettet', sa.DateTime(), server_default=sa.text('now()'), nullable=False),\n sa.ForeignKeyConstraint(['overforingspakke_id'], ['overforingspakke.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id'),\n )\n\n\ndef downgrade():\n op.drop_table('workflow_overforingspakke')\n", "sub_path": "mottak-arkiv-service/alembic/versions/4ce1131b0075_add_table_workflowoverforingspakke.py", "file_name": "4ce1131b0075_add_table_workflowoverforingspakke.py", "file_ext": "py", "file_size_in_byte": 1160, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "479579089", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nstartTime = datetime.now()\ndef jobdesc(givenUrl):\n details=\"\"\n nexturl = \"https://internshala.com\"+givenUrl\n nextpage = requests.get(nexturl)\n soupnext = BeautifulSoup(nextpage.content, \"html.parser\")\n for p in soupnext.find_all(lambda tag: tag.name == 'div' and \n tag.get('class') == ['text-container']):\n details= details+\" \"+p.text\n return details\n \n\nURL = \"https://internshala.com/fresher-jobs/job\"\npage = requests.get(URL)\nsoup = BeautifulSoup(page.content, \"html.parser\")\njobs = []\njob_title =[]\njobdetails =[]\n\nfor div in soup.find_all(name=\"div\",attrs={\"class\":\"heading_4_5 profile\"}):\n job_title.append(div.text)\n\nfor div0 in soup.find_all(name=\"div\",attrs={\"class\":\"heading_6 company_name\"}):\n for a in div0.find_all(name=\"a\" , attrs={\"class\":\"link_display_like_text\"}):\n jobs.append(a.text)\nfor a in soup.find_all(name=\"a\" , attrs={\"class\":\"view_detail_button\"}):\n # print(a['href']+\" end\")\n jobdetails.append(jobdesc(a['href'])) \nimport pandas as pd\ndata_frame= pd.DataFrame({'Job title':job_title,'Company Name':jobs,'Job Details': jobdetails})\n\nprint(datetime.now()-startTime)\n", "sub_path": "internshala_bs4.py", "file_name": "internshala_bs4.py", "file_ext": "py", "file_size_in_byte": 1263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.now", "line_number": 4, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 4, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "496979908", "text": "import xml.etree.ElementTree as Et\nimport os\nimport argparse\n\n\ndef extract_filename(e):\n filename = e.find('file').text\n modular_index = filename.find('modular')\n if modular_index > -1:\n filename = filename[modular_index:]\n return filename\n\n\ndef process_entry(e, result):\n filename = extract_filename(e)\n if filename in result:\n result[filename] += 1\n else:\n result[filename] = 1\n\n\ndef process_entries(items, result):\n for e in items:\n process_entry(e, result)\n\n\ndef process_file(file, errors):\n tree = Et.parse(file)\n items = tree.findall('.//problem')\n process_entries(items, errors)\n\n\ndef process_files(path):\n errors = dict()\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith('.xml'):\n process_file(os.path.join(root, file), errors)\n return errors\n\n\ndef dump_result(errors):\n sorted_files = sorted(errors.keys())\n for file in sorted_files:\n print(file, errors[file])\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('dir', default='.', help='Directory to scan')\nargs = parser.parse_args()\n\nfiles_with_errors = process_files(args.dir)\ndump_result(files_with_errors)\n\nprint(\"Files count: \", len(files_with_errors), \" Errors count: \", sum(files_with_errors.values()))\n", "sub_path": "python/svl/scripts/ScanIntellijInspections.py", "file_name": "ScanIntellijInspections.py", "file_ext": "py", "file_size_in_byte": 1323, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 28, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "542703564", "text": "from repeats import TE, chrom, gene, repeatCal\nfrom optparse import OptionParser\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport math\nimport gffOp\n\n# Explore relationship between TE density around a gene and its expression level\nparser = OptionParser()\nparser.add_option(\"--gff\",action=\"store\",type=\"string\",dest=\"gff\",\n help=\"path to the GFF file\")\nparser.add_option(\"-r\",\"--repeats\",action=\"store\",type=\"string\",dest=\"repeats\",\n help=\"Path to a repeat bedgraph file\")\nparser.add_option('-w', action=\"store\",type=\"int\",dest=\"winSize\", default=5000,\n help=\"Window size. Default 5kb\")\nparser.add_option('-s','--size',action='store',type='string',dest='chromSize',\n help='Path to the chromosome size file')\nparser.add_option(\"-i\",\"--ignore\",action=\"store\",type=\"string\",dest=\"garbage\",\n help=\"A list of scaffolds that you'd like to ignore in downstream analysis\")\nparser.add_option('--htseq',action=\"store\",type=\"string\",dest=\"htseq_dir\",\n help=\"Path to a directory that contains htseq count output for all biological replicates of one experimental condition\")\nparser.add_option('-b','--bin',action=\"store\",type=\"float\",dest=\"binSize\",default=0.1,\n help=\"bin sizes for grouping genes with similar TE density\")\n\n(options,args) = parser.parse_args()\n\n# We can query the TE density from this object with gene ID\n# repeatCal alreadys ignores genes from S,T,U\ngDen = repeatCal(options.winSize, options.chromSize, options.repeats, options.gff).geneDict\nexon_dict = gffOp.calcExonLen(options.gff, options.garbage.split(','))\ngList = [g for g,d in gDen.items()]\ngList.sort(key=lambda g:gDen[g])\nexonList = [exon_dict[g] for g in gList]\n\nbins = []\nr = 0\nindex = 0\nfor g in gList:\n if gDen[g] > r:\n bins.append(index)\n r += options.binSize\n index += 1\nbins.append(index+1)\n\n\ndef TPM_mean(gList, htseq_dir, exonList, bins):\n gc_dict = gffOp.geneCount(htseq_dir)\n # sort gene ID by increasing TE density\n gcList = [gc_dict[g] for g in gList] # now every thing is in the same order, we can do vector calc below\n # transpose gcList so that, each row is the count for every gene in that biological replicate\n gcList = np.array(gcList).T\n T = np.sum(gcList/exonList, axis=1)[:,np.newaxis] # sum by row, that gives the scaling T for each replicate\n #print(T)\n # Now calculate TPM for each gene in each biological replicate\n # +1 as pseudocounts to avoid calculatioin as log 0\n # use log and exp operation to avoid overflow\n # formula taken from Wagner et al.\n #print((gcList+1)[1:100])\n #print(exonList[1:100])\n TPM = np.exp(np.log(gcList+1)+np.log(1e6)-np.log(exonList)-np.log(T))\n #print(np.mean(TPM,axis=1))\n TPM = np.mean(TPM, axis=0) # take the mean column wise\n \n TPM_mean = []\n for i in range(len(bins)-1):\n TPM_mean.append(np.mean(TPM[bins[i]:bins[i+1]]))\n return TPM_mean\n#print(TPM_mean)\n\ndirs = options.htseq_dir.split(',')\nTPM_mean_Root = TPM_mean(gList,dirs[0],exonList,bins)\nTPM_mean_Shoot = TPM_mean(gList,dirs[1],exonList,bins)\nTPM_mean_ShootApex = TPM_mean(gList,dirs[2],exonList,bins)\n\nfig = plt.figure()\nplt.plot(np.arange(0,1,options.binSize), np.log(TPM_mean_Root),'b', label='root')\nplt.plot(np.arange(0,1,options.binSize), np.log(TPM_mean_Shoot),'r', label='shoot')\nplt.plot(np.arange(0,1,options.binSize), np.log(TPM_mean_ShootApex),'m', label='shoot-apex')\nplt.xlabel(\"TE Density\")\nplt.ylabel(\"mean TPM($\\log_2$)\")\nplt.title(\"TE Density and Gene Expression Level\")\nplt.legend(loc='lower left', fontsize='large')\nplt.savefig('TE.Gene.Expr.jpg', dpi=300, format='jpg', quality=95)\n\n\n\n\n\n", "sub_path": "subGenomeAssignment/TE/TEvsExpr.py", "file_name": "TEvsExpr.py", "file_ext": "py", "file_size_in_byte": 3730, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "optparse.OptionParser", "line_number": 10, "usage_type": "call"}, {"api_name": "repeats.repeatCal", "line_number": 30, "usage_type": "call"}, {"api_name": "gffOp.calcExonLen", "line_number": 31, "usage_type": "call"}, {"api_name": "gffOp.geneCount", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "267364443", "text": "from os import mkdir\nfrom os.path import exists\nimport os\nfrom ete3 import Tree\n\nfrom sklearn.externals.joblib import Parallel, delayed\n\npath_to_trees = './data/trees_mc5/'\npath_to_pheno = './data/pheno_mc5/'\n\npath_to_filtered_trees = './data/trees_mc5_Farhat/'\npath_to_filtered_pheno = './data/pheno_mc5_Farhat/'\n\npath_to_subset = './data/subsets/Farhat_subset_filtered2.txt'\n\n\ndef filter_pheno(sample_ids, filename):\n with open(path_to_pheno + filename, 'r') as fin:\n with open(path_to_filtered_pheno + filename, 'w')as fout:\n for line in fin.readlines():\n if line.split('\\t')[0] in sample_ids:\n fout.write(line)\n return 0\n\n\ndef filter_tree(sample_ids, filename):\n t = Tree(path_to_trees + filename)\n l = []\n for s in t.get_leaves():\n if s.name in sample_ids:\n l.append(s.name)\n if len(l) > 0:\n t.prune(l)\n t.write(format=1, outfile=path_to_filtered_trees + filename)\n else:\n print(filename)\n return 0\n\n\ndef main():\n if not exists(path_to_filtered_pheno):\n mkdir(path_to_filtered_pheno)\n if not exists(path_to_filtered_trees):\n mkdir(path_to_filtered_trees)\n sample_ids = set()\n with open(path_to_subset, 'r') as f:\n for line in f.readlines():\n sample_ids.add(line.strip())\n print(str(len(sample_ids)) + ' samples in subset')\n for (dirpath, dirnames, filenames) in os.walk(path_to_pheno):\n tasks = Parallel(n_jobs=-1)(delayed(filter_pheno)(sample_ids, filename) for filename in filenames)\n c = 0\n for task in tasks:\n c += task\n for (dirpath, dirnames, filenames) in os.walk(path_to_trees):\n tasks = Parallel(n_jobs=-1)(delayed(filter_tree)(sample_ids, filename) for filename in filenames)\n c = 0\n for task in tasks:\n c += task\n\n\nif __name__ == '__main__':\n main()", "sub_path": "phylo_methods/filter_trees.py", "file_name": "filter_trees.py", "file_ext": "py", "file_size_in_byte": 1896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ete3.Tree", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 44, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.Parallel", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.delayed", "line_number": 51, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.Parallel", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.delayed", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "233259398", "text": "import pyro\nimport torch.nn as pt_nn\n# import inspect\n\n\nclass Object(object):\n pass\n\n\nnn = Object()\n\ncur_stack = None\n\n# look for all of the objects inside of the nn module\nfor fct_name in dir(pt_nn):\n\n nn_mod = getattr(pt_nn, fct_name)\n\n if not isinstance(nn_mod, dict):\n setattr(nn, fct_name, nn_mod)\n\n if not issubclass(nn_mod, pt_nn.Module):\n setattr(nn, fct_name, nn_mod)\n continue\n\n # wrap these objects\n orig_new = getattr(nn_mod, \"__new__\")\n\n # capture all new calls, and add to stack when appropriate\n def _new_replace(cls, pyro_name=None, *args, **kwargs):\n\n # you got initialized, and we sent in a pyro_name object\n # build a current stack\n if pyro_name is not None:\n global cur_stack\n prev_stack = cur_stack\n cur_stack = []\n # pyro_name is the unique id\n setattr(nn, pyro_name, 0)\n\n # temporarily overload\n # time to do shady shit\n PT_Paramter = pt_nn.Parameter\n\n # aha! time to do some shady shit\n def pt_param_call(*args, **kwargs):\n uid = getattr(nn, pyro_name)\n unique_param_name = \"_\".join(cur_stack) + str(uid)\n setattr(nn, pyro_name, uid + 1)\n rv = PT_Paramter(*args, **kwargs)\n return pyro.param(unique_param_name, rv, group=pyro_name)\n\n # overwrite current with overloaded fct\n setattr(pt_nn, \"Parameter\", pt_param_call)\n\n # don't bother, nothing is happening\n if cur_stack is None:\n return orig_new(*args, **kwargs)\n\n # add to stack\n cur_stack.append(cls.__name__)\n\n # if any calls to pt param are made, they'll be caught in above fct\n rv = orig_new(cls, *args, **kwargs)\n\n # no more of this new fct\n cur_stack.pop()\n\n if pyro_name is not None:\n cur_stack = prev_stack\n # set back to original\n setattr(pt_nn, \"Parameter\", PT_Paramter)\n\n return rv\n\n # def fct_replace(pyro_name=None, *args, **kwargs):\n# if pyro_name != None:\n# # we need to do something\n# # here we scope/wrap all param calls with named pyro.param\n# # calls\n\n# # all done setting\n# lm = module_class_constructor(*args, **kwargs)\n\n# # clear state of whatever we did above\n# if pyro_name != None:\n# # clear\n\n# return lm\n\n # setattr(nn, )\n\n# module_class_constructor = getattr(torch.nn, \"Module\")\n\n# # going to replace this module construction object\n# # e.g. fct_name = Linear or fct_name = LSTM\n# def fct_replace(pyro_name=None, *args, **kwargs):\n# if pyro_name != None:\n# # we need to do something\n# # here we scope/wrap all param calls with named pyro.param\n# # calls\n\n# # all done setting\n# lm = module_class_constructor(*args, **kwargs)\n\n# # clear state of whatever we did above\n# if pyro_name != None:\n# # clear\n\n# return lm\n\n\n# setattr(torch.nn, \"Module\")\n\n\n# # look for all of the objects inside of the nn module\n# for fct_name in dir(nn):\n\n# # if we're not a subclass of nn.Module, ignore this object\n# # old: if not inspect.isclass(getattr(nn, fct_name)):\n# if not issubclass(getattr(nn, fct_name), nn.Module):\n# continue\n\n# class_constructor = getattr(nn, fct_name)\n\n\n# #\n# setattr(nn, fct_name, fct_replace)\n\n\n# # later on\n# import pyro.nn as nn\n\n\n# nn.Linear(pyro_name=\"dope\")\n", "sub_path": "pyro/nn/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.nn", "line_number": 15, "usage_type": "argument"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "argument"}, {"api_name": "torch.nn.Module", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "pyro.param", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "argument"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "argument"}]} +{"seq_id": "455292683", "text": "import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport plotly.graph_objs as go\r\nfrom plotly.offline import init_notebook_mode, iplot, plot\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nobbey = pd.read_csv('http://think.cs.vt.edu/corgis/csv/cars/cars.csv')\r\n\r\nobbey.rename({'City mpg': 'Miles-per-gallon in city','Highway mpg': 'Miles-per-gallon in highway','Make': 'Manufacturer'},axis='columns', inplace=True)\r\n\r\nobbey.drop(['Engine Type', 'Height', 'Length','Number of Forward Gears', 'Torque','Transmission','Hybrid','Width','Model Year', 'Classification'], axis=1)\r\n\r\n\r\n################################################################################\r\n\r\napp = dash.Dash(__name__)\r\n\r\napp.title = 'Semester Python Project'\r\n\r\nserver = app.server\r\n\r\napp.layout = html.Div(children=[\r\n html.H1(\r\n children='Car fuel usage statistics',\r\n ),\r\n dcc.Markdown('''\r\nThe ***Boxplot*** compares **miles-per-gallon** consumption in city and highway between various different car manufacturers.\\n\r\nThe ***Histogram*** illustrates amount of **horsepower** depending on **driveline**.\\n\r\nThe ***Pie Chart*** shows most common **types of fuel** used.\\n\r\nCoded and deployed by *Filip A.* His profile can be found [here](https://instagram.com/flipoboi).\r\n '''),\r\n html.Div(id='graph_container', children=[\r\n dcc.Dropdown(\r\n id='graph_types',\r\n options=[\r\n {'label': 'Cars by Fuel Type', 'value': 'cars_by_fuel_type'},\r\n {'label': 'Miles per gallon in city and highway', 'value': 'miles_per_gallon_city_highway'},\r\n {'label': 'Horsepower by driveline', 'value': 'horsepower_by_driveline'},\r\n\r\n ],\r\n value='horsepower_by_driveline'\r\n ),\r\n html.Div(id='radio_container', children=[\r\n dcc.RadioItems(\r\n options=[\r\n {'label': 'Miles per gallon in highway', 'value': 'miles_per_gallon_highway'},\r\n {'label': 'Miles per gallon in city', 'value': 'miles_per_gallon_city'},\r\n ],\r\n value='miles_per_gallon_city',\r\n id='radio_input'\r\n )\r\n ])\r\n ]),\r\n dcc.Graph(id='my_graph',\r\n ),\r\n])\r\n\r\n@app.callback(Output('radio_container', 'style'), [Input('graph_types', 'value')])\r\ndef toggle_container(toggle_value):\r\n if toggle_value == 'miles_per_gallon_city_highway':\r\n return {'display': 'block'}\r\n else:\r\n return {'display': 'none'}\r\n\r\n@app.callback(\r\n Output(component_id='my_graph', component_property='figure'),\r\n [Input('graph_types', 'value'),\r\n Input('radio_input', 'value')]\r\n)\r\n\r\ndef update_output_div(graph_type, box_type):\r\n if graph_type == 'cars_by_fuel_type':\r\n values = []\r\n for ftype in obbey[\"Fuel Type\"].unique():\r\n count = obbey[(obbey[\"Fuel Type\"] == ftype)][\"Fuel Type\"].count()\r\n values.append(count)\r\n fig = {\r\n \"data\": [\r\n {\r\n \"values\": values,\r\n \"labels\": obbey[\"Fuel Type\"].unique(),\r\n \"hoverinfo\": \"label+percent\",\r\n \"type\": \"pie\"\r\n }],\r\n \"layout\": {\r\n \"title\":\"Cars by Fuel Type\",\r\n }\r\n}\r\n\r\n elif graph_type == 'miles_per_gallon_city_highway':\r\n if box_type == 'miles_per_gallon_highway':\r\n data = []\r\n for mtype in obbey.Manufacturer.unique():\r\n trace = go.Box(\r\n x = obbey[(obbey.Manufacturer == mtype)].Manufacturer,\r\n y = obbey[(obbey.Manufacturer == mtype)][\"Miles-per-gallon in highway\"],\r\n name = mtype,\r\n )\r\n data.append(trace)\r\n\r\n layout = go.Layout(\r\n title = 'Miles per gallon in Highway',\r\n showlegend = True,\r\n yaxis=dict(\r\n title=\"Miles per gallon\"),\r\n xaxis=dict(\r\n title=\"\"),\r\n)\r\n\r\n fig = dict(data=data, layout=layout)\r\n else:\r\n data = []\r\n for mtype in obbey.Manufacturer.unique():\r\n trace = go.Box(\r\n x = obbey[(obbey.Manufacturer == mtype)].Manufacturer,\r\n y = obbey[(obbey.Manufacturer == mtype)][\"Miles-per-gallon in city\"],\r\n name = mtype,\r\n )\r\n data.append(trace)\r\n\r\n layout = go.Layout(\r\n title = 'Miles per gallon in City',\r\n showlegend = True,\r\n yaxis=dict(\r\n title=\"Miles per gallon\"),\r\n xaxis=dict(\r\n title=\"\"),\r\n)\r\n\r\n fig = dict(data=data, layout=layout)\r\n else:\r\n trace1 = go.Histogram(\r\n x=obbey[(obbey.Driveline == \"All-wheel drive\")].Horsepower,\r\n name = 'All-wheel drive'\r\n)\r\n trace2 = go.Histogram(\r\n x=obbey[(obbey.Driveline == \"Front-wheel drive\")].Horsepower,\r\n name = 'Front-wheel drive'\r\n)\r\n trace3 = go.Histogram(\r\n x=obbey[(obbey.Driveline == \"Rear-wheel drive\")].Horsepower,\r\n name = 'Rear-wheel drive'\r\n)\r\n trace4 = go.Histogram(\r\n x=obbey[(obbey.Driveline == \"Four-wheel drive\")].Horsepower,\r\n name = 'Four-wheel drive'\r\n)\r\n\r\n data = [trace1, trace2, trace3, trace4]\r\n layout = go.Layout(\r\n title = 'Horsepower by driveline'\r\n)\r\n fig = go.Figure(data=data, layout=layout)\r\n\r\n return fig\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5349, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 19, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 25, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 26, "usage_type": "call"}, {"api_name": "dash_core_components.Markdown", "line_number": 29, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 35, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 36, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 46, "usage_type": "call"}, {"api_name": "dash_core_components.RadioItems", "line_number": 47, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 57, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 61, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 61, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Box", "line_number": 97, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 97, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 104, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 104, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Box", "line_number": 117, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 117, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 124, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 124, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Histogram", "line_number": 135, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 135, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Histogram", "line_number": 139, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 139, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Histogram", "line_number": 143, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 143, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Histogram", "line_number": 147, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 147, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 153, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 153, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 156, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 156, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 69, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 70, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "639333428", "text": "# 1. Fields in a tuple related to dates and times should always have values.\n# 2. All fields in a tuple relating to details about a name (eg: Menu Item Name, First Name, etc)\n# should always have a value.\n# 3. The total charge of an order, the quantity and charge for an order item, and the price for a\n# menu item should always have values.\n# 4. Customers must have a specified mobile number.\n# 5. The TimeDelivered time/date should always be after TimeReady.\n\n\n# please use the same names as in the ER diagram for naming tables and attributes)\nimport json\nimport re\nfrom psycopg2 import connect\nfrom psycopg2.errors import UniqueViolation, NotNullViolation\nfrom typing import List, Sequence\nimport pytest\n\nValue = str\n\n\nclass Test_db_constraints:\n @staticmethod\n def create_insert_statement(table, columns, values):\n insert_smt = '''INSERT INTO {table}{columns} VALUES {values}'''\n rv = insert_smt.format(\n table=table,\n columns=f'({\",\".join(columns)})' if columns is not None else '',\n values='(' + ','.join(['%s'] * len(values)) + ')'\n )\n return rv\n\n @staticmethod\n def create_drop_statement(table):\n return f'drop table if exists {table} cascade;'\n\n def setup_method(self):\n # (primitively) parse tables from source\n tables: List[str] = []\n with open('ddl.sql', 'r') as f:\n for line in f.readlines():\n if 'CREATE TABLE' in line.upper():\n match = re.search(r'.*?create table ([\\w\"]+)', line, re.IGNORECASE)\n if match:\n tables.append(match.group(1))\n else:\n raise ValueError(f'Could not parse line in ddl.sql: {line}')\n self.tables = tables\n\n # connect to database\n with open(f'SQL_CREDENTIALS.json') as creds_file:\n sql_creds = json.load(creds_file)\n self.connection = connect(database='comp9120_a2', **sql_creds)\n\n if not self.connection:\n raise RuntimeError(f'could not connect to db with creds {sql_creds}')\n\n # clear and recreate database from DDL file\n with open('ddl.sql', 'r') as f:\n ddl = f.read()\n\n for table in self.tables:\n self.dbexec(self.create_drop_statement(table), msg=f'drop table {table}')\n self.dbexec(ddl, msg='create all tables from ddl')\n\n def teardown_method(self):\n self.connection.commit()\n for table in self.tables:\n rv = self.dbquery(f'select * from {table}',\n msg=f'select * from {table}:')\n for row in rv:\n print(row)\n self.dbexec(self.create_drop_statement(table))\n self.connection.close()\n\n def dbquery(self,\n sql: str,\n args: Sequence[str] = None,\n msg: str = ''):\n print(msg)\n rv = []\n with self.connection:\n with self.connection.cursor() as cur:\n cur.execute(sql, args)\n for record in cur:\n rv.append(record)\n return rv\n\n def dbexec(self, sql: str, args: Sequence[str] = None, msg: str = ''):\n print(msg)\n with self.connection:\n with self.connection.cursor() as cur:\n cur.execute(sql, args)\n\n def dbinsert(self, table, columns, values, msg=None):\n if msg is None:\n msg = 'insert row'\n self.dbexec(self.create_insert_statement(table, columns, values),\n values,\n '(' + ','.join(map(repr, values)) + ') ' + msg)\n\n def dbget_table(self, table):\n return self.dbexec(f'select * from {table}', f'get table {table}')\n def run_multiple_inserts(self, table, columns, value_error_pairs):\n for vals, err in value_error_pairs:\n if err is None:\n self.dbinsert(table, columns, vals)\n else:\n with pytest.raises(err):\n self.dbinsert(table, columns, vals, msg='insert row, should fail')\n\n def TODO_menu_insert(self):\n # TODO this test needs to be worked out. It is not really acceptable right now.\n # must not allow a menu to be inserted before any menu items that are contained in that menu are inserted. (may change later)\n with pytest.raises(Exception):\n self.dbinsert('menu', ('MenuId', 'Description'), (0, 'desc'))\n\n # must have at least one menuitem first\n menucolumns = 'MenuItemId', 'Name', 'Price', 'Description', 'IsA'\n values = [\n ((0, 'dish', 31.89, None, 'Main'), None),\n ((1, 'dish', 37.21, 'description', 'Side'), None),\n ((2, 'dish', 37.21, 'description', 'Dessert'), None),\n ]\n self.run_multiple_inserts('MenuItem', menucolumns, values)\n\n # function so we can test that it works at least one way around\n def insert_menus():\n columns = 'MenuId', 'Description'\n value_error_pairs = [\n ((0, 'this is a short description'), None),\n ((0, 'this is a short description'), Exception),\n ((1, 'this is a longer description, it spans approx 100 characters. bla bla bla bla bla la bla bla bla bla'), None),\n ((1, 'desc'), UniqueViolation),\n ((2, None), None), # should allow null descriptions\n ]\n self.run_multiple_inserts('menu', columns, value_error_pairs)\n\n def insert_contains():\n columns = 'MenuId', 'MenuItemId'\n value_error_pairs = [\n ((0, 0), None),\n ((0, 0), Exception),\n ((0, 5), Exception),\n ((2, 0), None),\n ((1, 0), None),\n ]\n self.run_multiple_inserts('Contains', columns, value_error_pairs)\n\n try:\n insert_menus()\n insert_contains()\n except Exception:\n insert_contains()\n insert_menus()\n\n def test_menuitem_insert(self):\n menucolumns = 'MenuItemId', 'Name', 'Price', 'Description', 'IsA'\n values = [\n # test name not null\n ((0, None, 10, 'desc', 'Main'), NotNullViolation),\n # test name and price not unique\n ((0, 'name', 10, 'desc', 'Main'), None),\n ((1, 'name', 10, 'desc', 'Main'), None),\n # test name is string\n ((2, False, 10, 'desc', 'Main'), None),\n # test name length\n ((3, 'a' * 30, 10, 'desc', 'Main'), None),\n ((4, 'a' * 400, 10, 'desc', 'Main'), Exception),\n\n # test id not null\n ((None, 'name', 10, 'desc', 'Main'), NotNullViolation),\n # test id is unique\n ((3, 'name', 10, 'desc', 'Main'), UniqueViolation),\n\n # test price not null\n ((5, 'name', None, 'desc', 'Main'), NotNullViolation),\n # test price is number\n ((5, 'name', 'abc', 'desc', 'Main'), Exception),\n # test price cant have .001 cents\n ((5, 'name', 10.001, 'desc', 'Main'), Exception),\n # test price can have cents\n # ((5, 'name', 10.30, 'desc', 'Main'), Exception),\n\n # test description can be null\n ((5, 'name', 10, 'desc', 'Main'), None),\n # test description is string\n ((6, 'name', 10, False, 'Main'), None),\n\n # test isa is one of ('Main', 'Side', 'Dessert')\n ((7, 'name', 10, 'desc', 'Main'), None),\n ((8, 'name', 10, 'desc', 'Side'), None),\n ((9, 'name', 10, 'desc', 'Dessert'), None),\n ((10, 'name', 10, 'desc', 'main'), Exception),\n ((10, 'name', 10, 'desc', 'side'), Exception),\n ((10, 'name', 10, 'desc', 'dessert'), Exception),\n ((10, 'name', 10, 'desc', 'otheritem'), Exception),\n ]\n self.run_multiple_inserts('MenuItem', menucolumns, values)\n\n def test_customer(self):\n columns = 'CustomerId', 'MobileNo', 'FirstName', 'LastName', 'Address'\n values = [\n ((0, '0488888888', 'john', 'smith', '3 street street'), None),\n\n # test customerid unique\n ((0, '0488888888', 'john', 'smith', '3 street street'), Exception),\n # test customerid not null\n ((None, '0488888888', 'john', 'smith', '3 street street'), Exception),\n\n # test mobileno not unique\n ((1, '0488888888', 'john', 'smith', '3 street street'), None),\n # test mobileno not null\n ((2, None, 'john', 'smith', '3 street street'), Exception),\n # test mobileno is the right length\n ((2, '4' * 11, 'john', 'smith', '3 street street'), Exception),\n ((2, '4' * 9, 'john', 'smith', '3 street street'), Exception),\n ((2, '4' * 10, 'john', 'smith', '3 street street'), None),\n # test mobileno is a number\n ((3, False, 'john', 'smith', '3 street street'), Exception),\n ((3, 'a' * 10, 'john', 'smith', '3 street street'), Exception),\n\n # test firstname and lastname not null\n ((3, '0488888888', None, 'smith', '3 street street'), Exception),\n ((3, '0488888888', 'john', None, '3 street street'), Exception),\n # test firstname and lastname are strings\n ((3, '0488888888', False, 'smith', '3 street street'), Exception),\n ((3, '0488888888', 'john', False, '3 street street'), Exception),\n\n # test address can be null\n ((3, '0488888888', 'john', 'smith', None), None),\n # test address is string\n ((4, '0488888888', 'john', 'smith', False), None),\n ]\n self.run_multiple_inserts('Customer', columns, values)\n\n# Not sure why this works here and not in a test.\n# tdb = Test_db_constraints()\n# tdb.setup_method()\n# tdb.dbinsert('courier', None, ('53', 'name', 'address', '048756654'))\n# tdb.teardown_method()\n", "sub_path": "test_database.py", "file_name": "test_database.py", "file_ext": "py", "file_size_in_byte": 9999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "re.search", "line_number": 42, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 51, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Sequence", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 88, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 108, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 114, "usage_type": "call"}, {"api_name": "psycopg2.errors.UniqueViolation", "line_number": 133, "usage_type": "name"}, {"api_name": "psycopg2.errors.NotNullViolation", "line_number": 160, "usage_type": "name"}, {"api_name": "psycopg2.errors.NotNullViolation", "line_number": 171, "usage_type": "name"}, {"api_name": "psycopg2.errors.UniqueViolation", "line_number": 173, "usage_type": "name"}, {"api_name": "psycopg2.errors.NotNullViolation", "line_number": 176, "usage_type": "name"}]} +{"seq_id": "227181484", "text": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\n\nclass CountermeasureTaskListCtrl(wx.ListCtrl):\n def __init__(self,parent,dp):\n wx.ListCtrl.__init__(self,parent,armid.COUNTERMEASURE_LISTTASKS_ID,size=wx.DefaultSize,style=wx.LC_REPORT | wx.LC_SINGLE_SEL)\n self.dbProxy = dp\n self.theCurrentEnvironment = ''\n self.InsertColumn(0,'Task')\n self.SetColumnWidth(0,150)\n\n def setEnvironment(self,environmentName):\n self.theCurrentEnvironment = environmentName\n\n def load(self,dims):\n self.DeleteAllItems()\n for idx,dim in enumerate(dims):\n self.InsertStringItem(idx,str(dim))\n\n def dimensions(self):\n dimList = []\n for x in range(self.GetItemCount()):\n dimList.append(self.GetItemText(x))\n return dimList\n", "sub_path": "cairis/cairis/CountermeasureTaskListCtrl.py", "file_name": "CountermeasureTaskListCtrl.py", "file_ext": "py", "file_size_in_byte": 1519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "wx.ListCtrl", "line_number": 22, "usage_type": "attribute"}, {"api_name": "wx.ListCtrl.__init__", "line_number": 24, "usage_type": "call"}, {"api_name": "wx.ListCtrl", "line_number": 24, "usage_type": "attribute"}, {"api_name": "armid.COUNTERMEASURE_LISTTASKS_ID", "line_number": 24, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 24, "usage_type": "attribute"}, {"api_name": "wx.LC_REPORT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "wx.LC_SINGLE_SEL", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "306143145", "text": "from base64 import b64encode, b64decode\nfrom io import BytesIO\nfrom PIL import Image\nfrom captcha.image import ImageCaptcha\n\n# 生成验证码图片\ncaptcha = ImageCaptcha(width=160, height=60,\n fonts=['/Library/Fonts/Georgia.ttf', '/Library/Fonts/Courier New.ttf'])\n\ncap = captcha.generate_image('JAVA')\n# or\ncap = captcha.create_captcha_image('JAVA', (0, 255, 255), (123, 123, 123))\ncaptcha.create_noise_curve(cap, (0, 127, 127))\ncaptcha.create_noise_dots(cap, (0, 255, 255), width=2, number=30)\n\ncap.save('captcha.JPEG')\ncap.show()\n\n# 直接使用ImageCpatcha生成base64\noutput_buffer = BytesIO()\ncap.save(output_buffer, format='JPEG')\nbyte_data = output_buffer.getvalue()\nb64_str = b64encode(byte_data)\n\n# base64转换为图片\nimage_bytes = b64decode(b64_str)\nimage_data = BytesIO(image_bytes)\nimg = Image.open(image_data)\nimg.save('captcha2.JPEG')\nimg.show()\n", "sub_path": "py3Example/tt.py", "file_name": "tt.py", "file_ext": "py", "file_size_in_byte": 888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "captcha.image", "line_number": 7, "usage_type": "name"}, {"api_name": "captcha.image.ImageCaptcha", "line_number": 7, "usage_type": "call"}, {"api_name": "captcha.image.generate_image", "line_number": 10, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 10, "usage_type": "name"}, {"api_name": "captcha.image.create_captcha_image", "line_number": 12, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 12, "usage_type": "name"}, {"api_name": "captcha.image.create_noise_curve", "line_number": 13, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 13, "usage_type": "name"}, {"api_name": "captcha.image.create_noise_dots", "line_number": 14, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 14, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 20, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 23, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 26, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "536474161", "text": "# -*- coding: UTF-8 -*-\n\n# Copyright (c) 2006-2016 Matthew Zipay <mattz@ninthtest.net>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"Test cases and runner for the :mod:`aglyph.component` module.\"\"\"\n\n__author__ = \"Matthew Zipay <mattz@ninthtest.net>\"\n__version__ = \"2.1.0\"\n\nimport functools\nimport logging\nimport unittest\n\nfrom aglyph import AglyphError\nfrom aglyph.component import (\n Component,\n Evaluator,\n Reference,\n Strategy,\n Template,\n)\n\nfrom test import enable_debug_logging\nfrom test.dummy import Alpha\n\n__all__ = [\n \"ComponentTest\",\n \"EvaluatorTest\",\n \"ReferenceTest\",\n \"suite\"\n]\n\n# don't use __name__ here; can be run as \"__main__\"\n_logger = logging.getLogger(\"test.test_component\")\n\n# PYVER: unittest.TestCase.assertIsNone is missing in Jython 2.5.3\nif (not hasattr(unittest.TestCase, \"assertIsNone\")):\n def _assert_is_none(self, obj, msg=None):\n if (obj is not None):\n self.fail(msg if (msg is not None) else \"%r is not None\" % obj)\n unittest.TestCase.assertIsNone = _assert_is_none\n\n\nclass TemplateTest(unittest.TestCase):\n \"\"\"Test the :class:`aglyph.component.Template` class.\"\"\"\n\n def test_defaults(self):\n template = Template(\"test\")\n self.assertEqual(\"test\", template.unique_id)\n self.assertIsNone(template.parent_id)\n self.assertIsNone(template.after_inject)\n self.assertIsNone(template.before_clear)\n self.assertEqual([], template.args)\n self.assertEqual({}, template.keywords)\n self.assertEqual({}, template.attributes)\n\n def test_readonly_properties(self):\n template = Template(\"test\")\n self.assertRaises(AttributeError, setattr, template, \"unique_id\", \"x\")\n self.assertRaises(AttributeError, setattr, template, \"parent_id\", \"x\")\n self.assertRaises(AttributeError, setattr, template, \"after_inject\",\n \"after_inject\")\n self.assertRaises(AttributeError, setattr, template, \"before_clear\",\n \"before_clear\")\n self.assertRaises(AttributeError, setattr, template, \"args\", [])\n self.assertRaises(AttributeError, setattr, template, \"keywords\", {})\n self.assertRaises(AttributeError, setattr, template, \"attributes\", {})\n\n def test_parent_id(self):\n template = Template(\"test\", parent_id=\"parent\")\n self.assertEqual(\"parent\", template.parent_id)\n\n def test_after_inject(self):\n template = Template(\"test\", after_inject=\"template_after_inject\")\n self.assertEqual(\"template_after_inject\", template.after_inject)\n\n def test_before_clear(self):\n template = Template(\"test\", before_clear=\"template_before_clear\")\n self.assertEqual(\"template_before_clear\", template.before_clear)\n\n def test_attributes_are_ordered(self):\n template = Template(\"test\")\n template.attributes[\"set_value\"] = None\n template.attributes[\"prop\"] = None\n template.attributes[\"field\"] = None\n expected_keys = [\"set_value\", \"prop\", \"field\"]\n self.assertEqual(expected_keys, list(template.attributes.keys()))\n\n\nclass ComponentTest(unittest.TestCase):\n \"\"\"Test the :class:`aglyph.component.Component` class.\"\"\"\n\n def test_bad_strategy(self):\n self.assertRaises(ValueError, Component, \"test.dummy.Alpha\",\n strategy=\"spam\")\n\n def test_defaults(self):\n component = Component(\"test.dummy.Alpha\")\n self.assertEqual(\"test.dummy.Alpha\", component.unique_id)\n # Component.component_id will be removed in 3.0.0\n self.assertEqual(component.unique_id, component.component_id)\n self.assertEqual(\"test.dummy.Alpha\", component.dotted_name)\n self.assertIsNone(component.factory_name)\n self.assertIsNone(component.member_name)\n self.assertEqual(Strategy.PROTOTYPE, component.strategy)\n self.assertIsNone(component.parent_id)\n self.assertIsNone(component.after_inject)\n self.assertIsNone(component.before_clear)\n self.assertEqual([], component.args)\n # Component.init_args will be removed in 3.0.0\n self.assertEqual(component.args, component.init_args)\n self.assertEqual({}, component.keywords)\n # Component.init_keywords will be removed in 3.0.0\n self.assertEqual(component.keywords, component.init_keywords)\n self.assertEqual({}, component.attributes)\n\n def test_unique_id_and_dotted_name(self):\n comonent = Component(\"alpha\", \"test.dummy.Alpha\")\n self.assertEqual(\"alpha\", comonent.component_id)\n self.assertEqual(\"test.dummy.Alpha\", comonent.dotted_name)\n\n def test_readonly_properties(self):\n component = Component(\"alpha\", \"test.dummy.Alpha\")\n self.assertRaises(AttributeError, setattr, component, \"unique_id\", \"x\")\n # Component.component_id will be removed in 3.0.0\n self.assertRaises(AttributeError, setattr, component, \"component_id\",\n \"x\")\n self.assertRaises(AttributeError, setattr, component, \"dotted_name\",\n \"test.dummy.Beta\")\n self.assertRaises(AttributeError, setattr, component, \"factory_name\",\n \"factory\")\n self.assertRaises(AttributeError, setattr, component, \"member_name\",\n \"member\")\n self.assertRaises(AttributeError, setattr, component, \"strategy\",\n Strategy.SINGLETON)\n self.assertRaises(AttributeError, setattr, component, \"parent_id\",\n \"parent\")\n self.assertRaises(AttributeError, setattr, component, \"after_inject\",\n \"after_inject\")\n self.assertRaises(AttributeError, setattr, component, \"before_clear\",\n \"before_clear\")\n self.assertRaises(AttributeError, setattr, component, \"args\", [])\n # Component.init_args will be removed in 3.0.0\n self.assertRaises(AttributeError, setattr, component, \"init_args\", [])\n self.assertRaises(AttributeError, setattr, component, \"keywords\", {})\n # Component.init_keywords will be removed in 3.0.0\n self.assertRaises(AttributeError, setattr, component, \"init_keywords\", {})\n self.assertRaises(AttributeError, setattr, component, \"attributes\", {})\n\n def test_explicit_prototype_strategy(self):\n component = Component(\"test.dummy.Alpha\", strategy=Strategy.PROTOTYPE)\n self.assertEqual(Strategy.PROTOTYPE, component.strategy)\n\n def test_explicit_singleton_strategy(self):\n component = Component(\"test.dummy.Alpha\", strategy=Strategy.SINGLETON)\n self.assertEqual(Strategy.SINGLETON, component.strategy)\n\n def test_explicit_borg_strategy(self):\n component = Component(\"test.dummy.Alpha\", strategy=Strategy.BORG)\n self.assertEqual(Strategy.BORG, component.strategy)\n\n def test_explicit_weakref_strategy(self):\n component = Component(\"test.dummy.Alpha\", strategy=Strategy.WEAKREF)\n self.assertEqual(Strategy.WEAKREF, component.strategy)\n\n def test_attributes_are_ordered(self):\n component = Component(\"test.dummy.Alpha\")\n component.attributes[\"set_value\"] = None\n component.attributes[\"prop\"] = None\n component.attributes[\"field\"] = None\n expected_keys = [\"set_value\", \"prop\", \"field\"]\n self.assertEqual(expected_keys, list(component.attributes.keys()))\n\n def test_factory_name(self):\n component = Component(\"epsilon-factory\",\n dotted_name=\"test.dummy.Epsilon\",\n factory_name=\"class_factory\")\n self.assertEqual(\"epsilon-factory\", component.component_id)\n self.assertEqual(\"test.dummy.Epsilon\", component.dotted_name)\n self.assertEqual(\"class_factory\", component.factory_name)\n self.assertTrue(component.member_name is None)\n\n def test_member_name(self):\n component = Component(\"epsilon-class\", dotted_name=\"test.dummy\",\n member_name=\"Epsilon\")\n self.assertEqual(\"epsilon-class\", component.component_id)\n self.assertEqual(\"test.dummy\", component.dotted_name)\n self.assertEqual(\"Epsilon\", component.member_name)\n self.assertTrue(component.factory_name is None)\n\n def test_factory_and_member_names_mutually_exclusive(self):\n self.assertRaises(AglyphError, Component, \"epsilon-fail\",\n dotted_name=\"test.dummy.Epsilon\",\n factory_name=\"class_factory\",\n member_name=\"ATTRIBUTE\")\n\n def test_parent_id(self):\n component = Component(\"test.dummy.Alpha\", parent_id=\"alpha-parent\")\n self.assertEqual(\"alpha-parent\", component.parent_id)\n\n def test_after_inject(self):\n component = Component(\"test.dummy.Alpha\",\n after_inject=\"component_after_inject\")\n self.assertEqual(\"component_after_inject\", component.after_inject)\n\n def test_prototype_ignores_before_clear(self):\n component = Component(\"test.dummy.Alpha\",\n before_clear=\"component_before_clear\")\n self.assertIsNone(component.before_clear)\n\n def test_component_singleton_before_clear(self):\n component = Component(\"test.dummy.Alpha\", strategy=\"singleton\",\n before_clear=\"component_before_clear\")\n self.assertEqual(\"component_before_clear\", component.before_clear)\n\n def test_component_borg_before_clear(self):\n component = Component(\"test.dummy.Alpha\", strategy=\"borg\",\n before_clear=\"component_before_clear\")\n self.assertEqual(\"component_before_clear\", component.before_clear)\n\n def test_component_weakref_before_clear(self):\n component = Component(\"test.dummy.Alpha\", strategy=\"weakref\",\n before_clear=\"component_before_clear\")\n self.assertEqual(\"component_before_clear\", component.before_clear)\n\n\nclass EvaluatorTest(unittest.TestCase):\n \"\"\"Test the :class:`aglyph.component.Evaluator` class.\"\"\"\n\n def test_func_not_callable(self):\n self.assertRaises(TypeError, Evaluator, None)\n\n def test_func_with_no_args_or_keywords(self):\n evaluator = Evaluator(int)\n self.assertTrue(evaluator.func is int)\n self.assertEqual((), evaluator.args)\n self.assertEqual({}, evaluator.keywords)\n self.assertEqual(0, evaluator(None))\n\n def test_func_with_only_args(self):\n evaluator = Evaluator(int, \"4f\", 16)\n self.assertTrue(evaluator.func is int)\n self.assertEqual((\"4f\", 16), evaluator.args)\n self.assertEqual({}, evaluator.keywords)\n self.assertEqual(79, evaluator(None))\n\n def test_func_with_only_keywords(self):\n def f(a=None, b=None):\n if (None not in [a, b]):\n return a * b\n evaluator = Evaluator(f, a=7, b=9)\n self.assertTrue(evaluator.func is f)\n self.assertEqual((), evaluator.args)\n self.assertEqual({'a': 7, 'b': 9}, evaluator.keywords)\n self.assertEqual(63, evaluator(None))\n\n def test_func_with_both_args_and_keywords(self):\n f = lambda x: int(x, 0)\n evaluator = Evaluator(max, \"0xa\", \"0xF\", key=f)\n self.assertTrue(evaluator.func is max)\n self.assertEqual((\"0xa\", \"0xF\"), evaluator.args)\n self.assertEqual({\"key\": f}, evaluator.keywords)\n # natural sort would identify \"0xa\" as the max\n self.assertEqual(\"0xF\", evaluator(None))\n\n def test_readonly_properties(self):\n evaluator = Evaluator(int, \"79\")\n self.assertRaises(AttributeError, setattr, evaluator, \"func\", min)\n self.assertRaises(AttributeError, setattr, evaluator, \"args\", (\"0xb\",\n \"0xE\"))\n self.assertRaises(AttributeError, setattr, evaluator, \"keywords\",\n {\"key\": lambda x: x.upper()})\n\n def test_resolve_reference(self):\n class MockAssembler(object):\n def assemble(self, component_id):\n return component_id.upper()\n evaluator = Evaluator(Alpha, Reference(\"test-ref\"))\n alpha = evaluator(MockAssembler())\n self.assertEqual(\"TEST-REF\", alpha.arg)\n\n def test_resolve_evaluator(self):\n evaluator = Evaluator(int, Evaluator(hex, 79), 0)\n self.assertEqual(79, evaluator(None))\n\n def test_resolve_partial(self):\n evaluator = Evaluator(int, functools.partial(hex, 79), 0)\n self.assertEqual(79, evaluator(None))\n\n def test_resolve_dict(self):\n evaluator = Evaluator(Alpha, {\"key\": \"value\"})\n alpha = evaluator(None)\n self.assertEqual({\"key\": \"value\"}, alpha.arg)\n\n def test_resolve_sequence(self):\n evaluator = Evaluator(Alpha, (2, 3, 5, 7))\n alpha = evaluator(None)\n self.assertEqual((2, 3, 5, 7), alpha.arg)\n\n def test_resolve_nested(self):\n class MockAssembler(object):\n def assemble(self, component_id):\n return component_id.upper()\n evaluator = Evaluator(Alpha, {\"key\": Reference(\"test-ref\")})\n alpha = evaluator(MockAssembler())\n self.assertEqual({\"key\": \"TEST-REF\"}, alpha.arg)\n\n\n# PYVER: unicode is undefined in Python >= 3.0 (str is a Unicode string in\n# Python >= 3.0)\ntry:\n _ustr = eval(\"unicode\")\nexcept NameError:\n _ustr = str\n\n# PYVER: u\"...\" syntax is illegal in Python >= 3.0, and b\"...\" syntax is\n# illegal in Python < 3.0\ntry:\n _dummy_dot_Alpha_ascii = eval(\"u'test.dummy.Alpha'.encode('ascii')\")\n _Delta_ustr = eval(\"u'\\u0394'\")\n _Delta_utf8 = eval(\"u'\\u0394'.encode('utf-8')\")\nexcept SyntaxError:\n _dummy_dot_Alpha_ascii = eval(\"b'test.dummy.Alpha'\")\n _Delta_ustr = eval(\"'\\u0394'\")\n _Delta_utf8 = eval(\"'\\u0394'.encode('utf-8')\")\n\n\nclass ReferenceTest(unittest.TestCase):\n \"\"\"Test the :class:`aglyph.component.Reference` class.\"\"\"\n\n def test_dotted_name_ascii(self):\n ref = Reference(\"test.dummy.Alpha\")\n self.assertTrue(isinstance(ref, _ustr))\n self.assertEqual(_dummy_dot_Alpha_ascii, ref.encode(\"ascii\"))\n\n def test_component_id_unicode_error(self):\n # Greek capital letter Delta (U+0394)\n ref = Reference(_Delta_ustr)\n self.assertTrue(isinstance(ref, _ustr))\n self.assertRaises(UnicodeEncodeError, ref.encode, \"ascii\")\n\n def test_component_id_unicode_encode(self):\n # Greek capital letter Delta (U+0394)\n ref = Reference(_Delta_ustr)\n self.assertTrue(isinstance(ref, _ustr))\n self.assertEqual(_Delta_utf8, ref.encode(\"utf-8\"))\n\n\ndef suite():\n \"\"\"Build the test suite for the :mod:`aglyph.component` module.\"\"\"\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TemplateTest))\n suite.addTest(unittest.makeSuite(ComponentTest))\n suite.addTest(unittest.makeSuite(EvaluatorTest))\n suite.addTest(unittest.makeSuite(ReferenceTest))\n _logger.debug(\"RETURN %r\", suite)\n return suite\n\n\nif (__name__ == \"__main__\"):\n enable_debug_logging(suite)\n unittest.TextTestRunner().run(suite())\n\n", "sub_path": "test/test_component.py", "file_name": "test_component.py", "file_ext": "py", "file_size_in_byte": 16169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 52, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 55, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 59, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 62, "usage_type": "attribute"}, {"api_name": "aglyph.component.Template", "line_number": 66, "usage_type": "call"}, {"api_name": "aglyph.component.Template", "line_number": 76, "usage_type": "call"}, {"api_name": "aglyph.component.Template", "line_number": 88, "usage_type": "call"}, {"api_name": "aglyph.component.Template", "line_number": 92, "usage_type": "call"}, {"api_name": "aglyph.component.Template", "line_number": 96, "usage_type": "call"}, {"api_name": "aglyph.component.Template", "line_number": 100, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 108, "usage_type": "attribute"}, {"api_name": "aglyph.component.Component", "line_number": 112, "usage_type": "argument"}, {"api_name": "aglyph.component.Component", "line_number": 116, "usage_type": "call"}, {"api_name": "aglyph.component.Strategy.PROTOTYPE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 123, "usage_type": "name"}, {"api_name": "aglyph.component.Component", "line_number": 136, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 141, "usage_type": "call"}, {"api_name": "aglyph.component.Strategy.SINGLETON", "line_number": 153, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 153, "usage_type": "name"}, {"api_name": "aglyph.component.Component", "line_number": 169, "usage_type": "call"}, {"api_name": "aglyph.component.Strategy.PROTOTYPE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 169, "usage_type": "name"}, {"api_name": "aglyph.component.Strategy.PROTOTYPE", "line_number": 170, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 170, "usage_type": "name"}, {"api_name": "aglyph.component.Component", "line_number": 173, "usage_type": "call"}, {"api_name": "aglyph.component.Strategy.SINGLETON", "line_number": 173, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 173, "usage_type": "name"}, {"api_name": "aglyph.component.Strategy.SINGLETON", "line_number": 174, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 174, "usage_type": "name"}, {"api_name": "aglyph.component.Component", "line_number": 177, "usage_type": "call"}, {"api_name": "aglyph.component.Strategy.BORG", "line_number": 177, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 177, "usage_type": "name"}, {"api_name": "aglyph.component.Strategy.BORG", "line_number": 178, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 178, "usage_type": "name"}, {"api_name": "aglyph.component.Component", "line_number": 181, "usage_type": "call"}, {"api_name": "aglyph.component.Strategy.WEAKREF", "line_number": 181, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 181, "usage_type": "name"}, {"api_name": "aglyph.component.Strategy.WEAKREF", "line_number": 182, "usage_type": "attribute"}, {"api_name": "aglyph.component.Strategy", "line_number": 182, "usage_type": "name"}, {"api_name": "aglyph.component.Component", "line_number": 185, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 193, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 202, "usage_type": "call"}, {"api_name": "aglyph.AglyphError", "line_number": 210, "usage_type": "argument"}, {"api_name": "aglyph.component.Component", "line_number": 210, "usage_type": "argument"}, {"api_name": "aglyph.component.Component", "line_number": 216, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 220, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 225, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 230, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 235, "usage_type": "call"}, {"api_name": "aglyph.component.Component", "line_number": 240, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 245, "usage_type": "attribute"}, {"api_name": "aglyph.component.Evaluator", "line_number": 249, "usage_type": "argument"}, {"api_name": "aglyph.component.Evaluator", "line_number": 252, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 259, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 269, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 277, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 285, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 296, "usage_type": "call"}, {"api_name": "test.dummy.Alpha", "line_number": 296, "usage_type": "argument"}, {"api_name": "aglyph.component.Reference", "line_number": 296, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 301, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 305, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 305, "usage_type": "call"}, {"api_name": "aglyph.component.Evaluator", "line_number": 309, "usage_type": "call"}, {"api_name": "test.dummy.Alpha", "line_number": 309, "usage_type": "argument"}, {"api_name": "aglyph.component.Evaluator", "line_number": 314, "usage_type": "call"}, {"api_name": "test.dummy.Alpha", "line_number": 314, "usage_type": "argument"}, {"api_name": "aglyph.component.Evaluator", "line_number": 322, "usage_type": "call"}, {"api_name": "test.dummy.Alpha", "line_number": 322, "usage_type": "argument"}, {"api_name": "aglyph.component.Reference", "line_number": 322, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 346, "usage_type": "attribute"}, {"api_name": "aglyph.component.Reference", "line_number": 350, "usage_type": "call"}, {"api_name": "aglyph.component.Reference", "line_number": 356, "usage_type": "call"}, {"api_name": "aglyph.component.Reference", "line_number": 362, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 369, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 370, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 371, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 372, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 373, "usage_type": "call"}, {"api_name": "test.enable_debug_logging", "line_number": 379, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 380, "usage_type": "call"}]} +{"seq_id": "333876290", "text": "from contextlib import suppress\nfrom functools import lru_cache\nfrom itertools import count\nfrom math import ceil\nfrom pathlib import Path\nfrom pprint import pprint\nfrom time import sleep\nfrom typing import Optional, Dict\n\nfrom lifxlan3 import TileChain, LifxLAN, Color, Colors, cycle, init_log, timer, Dir\nfrom lifxlan3.routines.tile.tile_utils import ColorMatrix, default_shape, tile_map, RC, default_color\n\n__author__ = 'acushner'\n\nlog = init_log(__name__)\n\n\n@lru_cache()\ndef get_tile_chain() -> Optional[TileChain]:\n with suppress(IndexError):\n lifx = LifxLAN()\n return lifx.tilechain_lights[0]\n\n\ndef _cm_test(c: Color) -> ColorMatrix:\n cm = ColorMatrix.from_shape(default_shape)\n cm[0, 0] = cm[0, 1] = cm[1, 0] = cm[1, 1] = c\n return cm\n\n\ndef id_tiles(*, rotate=False):\n \"\"\"set tiles to different colors in the corner to ID tile and help determine orientation\"\"\"\n tc = get_tile_chain()\n colors = 'MAGENTA', 'YELLOW', 'YALE_BLUE', 'GREEN', 'BROWN'\n for ti in tile_map.values():\n name = colors[ti.idx]\n print(ti.idx, name)\n cm = _cm_test(Colors[name])\n if rotate:\n cm = cm.rotate_from_origin(ti.origin)\n tc.set_tile_colors(ti.idx, cm.flattened)\n\n\n_color_replacements: Dict[str, Dict[Color, Color]] = dict(\n crono={Color(hue=54612, saturation=65535, brightness=65535, kelvin=3200): Colors.OFF},\n ff6={Color(hue=32767, saturation=65535, brightness=32896, kelvin=3200): Colors.OFF},\n ff4={Color(hue=32767, saturation=65535, brightness=65535, kelvin=3200): Colors.OFF},\n mario_kart={Color(hue=21845, saturation=48059, brightness=65535, kelvin=3200): Colors.OFF},\n lttp={Color(hue=32767, saturation=65535, brightness=16448, kelvin=3200): Colors.OFF,\n Color(hue=32767, saturation=65535, brightness=32896, kelvin=3200): Colors.OFF},\n maniac={Color(hue=32767, saturation=65535, brightness=32896, kelvin=3200): Colors.OFF})\n\n\ndef _get_color_replacements(filename):\n for k, v in _color_replacements.items():\n if k in filename:\n return v\n return {}\n\n\ndef animate(filename: str, *, center: bool = False, sleep_secs: float = .75, in_terminal=False, size=RC(16, 16),\n strip=True):\n \"\"\"split color matrix and change images every `sleep_secs` seconds\"\"\"\n cm = ColorMatrix.from_filename(filename)\n color_map = _get_color_replacements(filename)\n for i, cm in enumerate(cycle(cm.split())):\n log.info('.')\n c_offset = 0 if not center else max(0, ceil(cm.width / 2 - 8))\n cm.replace(color_map)\n set_cm(cm, offset=RC(0, c_offset), size=size, in_terminal=in_terminal, strip=strip)\n sleep(sleep_secs)\n\n\ndef translate(filename: str, *, sleep_secs: float = .5, in_terminal=False,\n size=RC(16, 16), split=True, dir: Dir = Dir.right, n_iterations: int = None):\n \"\"\"move right\"\"\"\n cm = ColorMatrix.from_filename(filename)\n color_map = _get_color_replacements(filename)\n if split:\n cm = cm.split()[0]\n\n mult = 1 if dir is Dir.right else -1\n\n def _gen_offset():\n its = count() if n_iterations is None else range(n_iterations)\n for _ in its:\n for _c_offset in range(cm.width - size.c):\n yield mult * (cm.width - _c_offset - 1)\n\n for c_offset in _gen_offset():\n cm.replace(color_map)\n cm.wrap = True\n set_cm(cm, offset=RC(0, c_offset), size=size, in_terminal=in_terminal)\n sleep(sleep_secs)\n\n\n@timer\ndef set_cm(cm: ColorMatrix, offset=RC(0, 0), size=RC(16, 16),\n *, in_terminal=False, with_mini=True, strip=True, verbose=True,\n duration_msec=0):\n if strip:\n cm = cm.strip()\n orig_cm = cm = cm.get_range(RC(0, 0) + offset, size + offset)\n if in_terminal:\n print(cm.color_str)\n if verbose:\n print(cm.describe)\n print(cm.resize().color_str)\n print(cm.resize((4, 4)).color_str)\n return\n\n cm.set_max_brightness_pct(60)\n tiles = cm.to_tiles()\n\n idx_colors_map = {}\n for t_idx, cm in tiles.items():\n t_info = tile_map[t_idx]\n cm.replace({default_color: Color(1, 1, 100, 9000)})\n idx_colors_map[t_info.idx] = cm.flattened\n\n if with_mini:\n ti = tile_map[RC(2, -1)]\n idx_colors_map[ti.idx] = orig_cm.resize((8, 8)).rotate_from_origin(ti.origin).flattened\n\n tc = get_tile_chain()\n tc.set_tilechain_colors(idx_colors_map, duration=duration_msec)\n\n\ndef _cmp_colors(idx_colors_map):\n from itertools import starmap\n tc = get_tile_chain()\n lights = {idx: list(starmap(Color, tc.get_tile_colors(idx).colors)) for idx in idx_colors_map}\n\n here_there = {k: set(idx_colors_map[k]) - set(lights[k])\n for k in idx_colors_map}\n there_here = {k: set(lights[k]) - set(idx_colors_map[k])\n for k in idx_colors_map}\n\n print('here - there')\n pprint(here_there)\n print()\n print('there - here')\n pprint(there_here)\n print()\n\n\ndef _init_images():\n p = Path(__file__).parent / 'imgs'\n return sorted(f.name for f in p.iterdir())\n\n\ntry:\n images = _init_images()\nexcept FileNotFoundError:\n images = None\n\n\ndef for_talk():\n return animate('./imgs/text.png', sleep_secs=.5, strip=False)\n return id_tiles(rotate=False)\n return animate('imgs/m_small.png', sleep_secs=.75)\n return animate('imgs/ff4_tellah.png', sleep_secs=.75)\n\n\ndef __main():\n return for_talk()\n # return id_tiles(rotate=False)\n # return animate('./imgs/m_small.png', sleep_secs=.75)\n return animate('imgs/ff4_tellah.png', sleep_secs=.75)\n return translate('imgs/ff4_tellah.png', split=False, dir=Dir.left, sleep_secs=.1, n_iterations=4)\n return animate('imgs/mm_walk.png', sleep_secs=4, in_terminal=False)\n return animate('imgs/maniac_bernard.png')\n\n\nif __name__ == '__main__':\n __main()\n", "sub_path": "lifxlan3/routines/tile/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 5849, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "lifxlan3.init_log", "line_number": 15, "usage_type": "call"}, {"api_name": "contextlib.suppress", "line_number": 20, "usage_type": "call"}, {"api_name": "lifxlan3.LifxLAN", "line_number": 21, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 18, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "lifxlan3.TileChain", "line_number": 19, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 25, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix.from_shape", "line_number": 26, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.default_shape", "line_number": 26, "usage_type": "argument"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix", "line_number": 26, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix", "line_number": 25, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.tile_map.values", "line_number": 35, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.tile_map", "line_number": 35, "usage_type": "name"}, {"api_name": "lifxlan3.Colors", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 44, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 45, "usage_type": "call"}, {"api_name": "lifxlan3.Colors.OFF", "line_number": 45, "usage_type": "attribute"}, {"api_name": "lifxlan3.Colors", "line_number": 45, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 46, "usage_type": "call"}, {"api_name": "lifxlan3.Colors.OFF", "line_number": 46, "usage_type": "attribute"}, {"api_name": "lifxlan3.Colors", "line_number": 46, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 47, "usage_type": "call"}, {"api_name": "lifxlan3.Colors.OFF", "line_number": 47, "usage_type": "attribute"}, {"api_name": "lifxlan3.Colors", "line_number": 47, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 48, "usage_type": "call"}, {"api_name": "lifxlan3.Colors.OFF", "line_number": 48, "usage_type": "attribute"}, {"api_name": "lifxlan3.Colors", "line_number": 48, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 49, "usage_type": "call"}, {"api_name": "lifxlan3.Color", "line_number": 50, "usage_type": "call"}, {"api_name": "lifxlan3.Colors.OFF", "line_number": 49, "usage_type": "attribute"}, {"api_name": "lifxlan3.Colors", "line_number": 49, "usage_type": "name"}, {"api_name": "lifxlan3.Colors.OFF", "line_number": 50, "usage_type": "attribute"}, {"api_name": "lifxlan3.Colors", "line_number": 50, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 51, "usage_type": "call"}, {"api_name": "lifxlan3.Colors.OFF", "line_number": 51, "usage_type": "attribute"}, {"api_name": "lifxlan3.Colors", "line_number": 51, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.RC", "line_number": 61, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix.from_filename", "line_number": 64, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix", "line_number": 64, "usage_type": "name"}, {"api_name": "lifxlan3.cycle", "line_number": 66, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 68, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.RC", "line_number": 70, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "lifxlan3.Dir", "line_number": 75, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.RC", "line_number": 75, "usage_type": "call"}, {"api_name": "lifxlan3.Dir.right", "line_number": 75, "usage_type": "attribute"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix.from_filename", "line_number": 77, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix", "line_number": 77, "usage_type": "name"}, {"api_name": "lifxlan3.Dir.right", "line_number": 82, "usage_type": "attribute"}, {"api_name": "lifxlan3.Dir", "line_number": 82, "usage_type": "name"}, {"api_name": "itertools.count", "line_number": 85, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.RC", "line_number": 93, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.ColorMatrix", "line_number": 98, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.RC", "line_number": 98, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.RC", "line_number": 103, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.tile_map", "line_number": 117, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.default_color", "line_number": 118, "usage_type": "name"}, {"api_name": "lifxlan3.Color", "line_number": 118, "usage_type": "call"}, {"api_name": "lifxlan3.routines.tile.tile_utils.tile_map", "line_number": 122, "usage_type": "name"}, {"api_name": "lifxlan3.routines.tile.tile_utils.RC", "line_number": 122, "usage_type": "call"}, {"api_name": "lifxlan3.timer", "line_number": 97, "usage_type": "name"}, {"api_name": "itertools.starmap", "line_number": 132, "usage_type": "call"}, {"api_name": "lifxlan3.Color", "line_number": 132, "usage_type": "argument"}, {"api_name": "pprint.pprint", "line_number": 140, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 143, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 148, "usage_type": "call"}, {"api_name": "lifxlan3.Dir.left", "line_number": 170, "usage_type": "attribute"}, {"api_name": "lifxlan3.Dir", "line_number": 170, "usage_type": "name"}]} +{"seq_id": "238858388", "text": "from utils.elastic_client import elastic_client, ElasticScroller\nfrom cachetools import LFUCache, cached\n\nmovie_data_cache = LFUCache(maxsize=1000)\n\n\ndef get_movies_count(dataset: str):\n return elastic_client.count('movies', {\n 'query': {\n 'term': {\n 'dataset': {\n 'value': dataset\n }\n }\n }\n })\n\n\ndef create_genre_vector(genres: list):\n vec = ['0'] * 20\n genre_index = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama',\n 'Fantasy', 'Film-Noir', 'Horror', 'IMAX', 'Mystery', 'Musical', '(no genres listed)', 'Romance',\n 'Sci-Fi', 'Thriller', 'War', 'Western']\n for genre in genres:\n vec[genre_index.index(genre)] = str(1)\n\n return vec\n\n\ndef create_rating_histogram(ratings: list):\n vec = ['0'] * 10\n rating_index = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]\n for rating in ratings:\n vec[rating_index.index(rating['rating'])] = str(rating['count'])\n\n return vec\n\n\n@cached(movie_data_cache)\ndef create_movie_vector(movie_id: str, dataset: str) -> str:\n movie_query = {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"term\": {\"dataset\": {\"value\": dataset}}},\n {\"term\": {\"movieId\": {\"value\": movie_id}}},\n ]\n }\n }\n }\n\n movie = elastic_client.search_one('movies', movie_query)['_source']\n\n genres = ' '.join(create_genre_vector(movie['genre']))\n rating_histogram = ' '.join(create_rating_histogram(movie['ratingHistogram']))\n\n return '{} {} {} {} {} {}'.format(\n movie['movieId'],\n movie['year'],\n genres,\n movie['averageRating'],\n movie['ratingCount'],\n rating_histogram)\n", "sub_path": "src/data/movie_data_preprocess.py", "file_name": "movie_data_preprocess.py", "file_ext": "py", "file_size_in_byte": 1834, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cachetools.LFUCache", "line_number": 4, "usage_type": "call"}, {"api_name": "utils.elastic_client.elastic_client.count", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.elastic_client.elastic_client", "line_number": 8, "usage_type": "name"}, {"api_name": "utils.elastic_client.elastic_client.search_one", "line_number": 52, "usage_type": "call"}, {"api_name": "utils.elastic_client.elastic_client", "line_number": 52, "usage_type": "name"}, {"api_name": "cachetools.cached", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "591450390", "text": "# get\r\nimport pandas as pd\r\nimport boto3\r\n\r\nbucket = \"abxgh-test\"\r\nfile_name = \"sample.csv\"\r\n\r\ns3 = boto3.client('s3') \r\n# 's3' is a key word. create connection to S3 using default config and all buckets within S3\r\n\r\nobj = s3.get_object(Bucket= bucket, Key= file_name) \r\n# get object and file (key) from bucket\r\n\r\ninitial_df = pd.read_csv(obj['Body']) # 'Body' is a key word\r\nprint(initial_df['이름'])\r\n\r\n\r\n\r\n\r\n# put\r\n# s3.put_object(Body=output, Bucket=bucket_name, Key=new_file)", "sub_path": "edu_aws/s3/s3_get.py", "file_name": "s3_get.py", "file_ext": "py", "file_size_in_byte": 482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "boto3.client", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "384477303", "text": "#here goes the Copyright (c)\n\n\"\"\"Read mpg file stats and consolidate data\"\"\"\n\n#Python stdlib imports\nimport re\nimport csv\nimport io\nimport argparse\nimport os\n\n#uses openpyxl for reading excel \nimport openpyxl\n\n\n#Constants\n__csv__player__columns = ['poste', 'nom', 'tit', 'entrees', 'buts', 'team']\n__csv__team__columns = ['sheet', 'name', 'short_name']\n_players_csv_file_ = \"pystatsmpg_players.csv\"\n_teams_csv_file_ = \"pystatsmpg_teams.csv\"\n\n\n#MPG constants\n_entered_string = \"<\"\n_injured_string = \"Bl.\"\n\n\n#Regex\n_team_regex = r\"-{8}[^0-9]*([0-9]*)[^A-Z]*([a-zA-Z]*).*\\n([^,]*)\"\n_day_regex = r\"J[0-9]{2}\"\n\n\n#internals\n_teams = []\n_players = []\n_current_team = \"\"\n_current_day = 1\n_days = []\n \ndef update(csv = None, players = None, teams = None):\n \"\"\"update stats\n\n provides either the csv file OR both players and teams csv\n\n Keyword arguments:\n csv -- csv export of the xlsx mpg stats file\n players -- csv dumps of the stats as formatted by the dump() \n teams -- csv dumps such as the one provided by dump()\n\n \"\"\"\n if csv is not None:\n _update_from_csv(csv)\n return\n _update_teams(teams)\n _update_players(players)\n\n\ndef init(csv):\n \"\"\"Init the stats with data provided as csv. The csv layout must\n follow the layout of the xlsx file provided by mpg. This layout\n is referred as mpg layout.\n\n \"\"\"\n _init()\n update(csv)\n\n\ndef clear():\n _init()\n \n\ndef xlsx_to_csv(filename):\n wb = openpyxl.load_workbook(filename, data_only=True)\n sh = wb.get_active_sheet()\n output = io.StringIO()\n c = csv.writer(output, lineterminator=\"\\n\")\n i = 1\n for sh in wb.worksheets:\n output.write(\"-------- \" + str(i) + \" - \" + sh.title + \"\\n\")\n i = i + 1\n for r in sh.rows:\n c.writerow([cell.internal_value for cell in r])\n return output.getvalue()\n\n\ndef update_xlsx(xlsx):\n csv = xlsx_to_csv(xlsx)\n _update_from_csv(csv)\n \n\n#Models \nclass Team:\n \"\"\"Team contains all team related properties \"\"\"\n\n def __init__(self):\n self.sheet = \"\"\n self.name = \"\"\n self.short_name = \"\"\n self.days = []\n\n \nclass Note:\n \"\"\"Note contains all notation related properties\"\"\"\n \n def __init__(self):\n self.note = None\n self.goals_pos = None\n self.goals_neg = None\n self.entered = False\n self.injured = False\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n\nclass Player:\n \"\"\"Player contains all player related properties \"\"\"\n\n def __init__(self):\n self.poste = \"\"\n self.nom = \"\"\n self.tit = \"\"\n self.entrees = \"\"\n self.buts = \"\"\n self.team = \"\"\n self.note = []\n self._updated = False\n\n\n def __str__(self):\n return str(self.__dict__)\n\n \nclass DayHeader:\n \"\"\"Day contains all day related properties \"\"\"\n\n def __init__(self):\n self.day = \"\"\n self.with_goals = False\n \n\n \n#creators\ndef team(sheet, name, short_name):\n \"Create a Team\"\n t = Team()\n t.sheet = sheet\n t.name = name\n t.short_name = short_name\n return t\n\n\ndef note(\n note = None,\n goals_pos = None,\n goals_neg = None,\n entered = False,\n injured = False):\n \"Create a Note\"\n n = Note()\n n.note = note\n n.goals_pos = goals_pos\n n.goals_neg = goals_neg\n n.entered = entered\n n.injured = injured\n return n\n\n\ndef player(\n poste = \"\",\n nom = \"\",\n tit = \"\",\n entrees = \"\",\n buts = \"\",\n team = \"\",\n note = []):\n p = Player()\n p.poste = poste\n p.nom = nom\n p.tit = tit\n p.entrees = entrees\n p.buts = buts\n p.team = team\n p.note = note\n return p\n\n\ndef dayheader(day = \"\", with_goals = False):\n d = DayHeader()\n d.day = day\n d.with_goals = with_goals\n return d\n\n\n#internals\ndef _update_players(players_csv):\n lines = players_csv.split(\"\\n\")\n _update_days_from_header_line(lines[0])\n for line in lines[1:]:\n if line is '':\n continue\n _update_player(line.split(','))\n\n\ndef _update_player(player_tokens):\n \"update player form the line\"\n player = {}\n i = 0\n for prop in __csv__player__columns:\n player[prop] = player_tokens[i]\n i = i + 1\n p = _get_or_create_player(player)\n for prop in player:\n setattr(p, prop, player[prop])\n offset = 6\n p.note = [_parse_note(token) for token in player_tokens[offset:offset + len(_days)]]\n\n\n__player_id_properties__ = ['poste', 'nom', 'team']\n\n \ndef _are_same_player(player, other):\n for prop in __player_id_properties__:\n if other[prop] != getattr(player, prop):\n return False\n return True\n \n \ndef _get_or_create_player(player):\n for p in _players:\n if _are_same_player(p, player):\n return p\n p = Player()\n for prop in __player_id_properties__:\n setattr(p, prop, player[prop])\n _players.append(p)\n return p\n \n\ndef _update_teams(teams_csv):\n \"return array of lines skipping the header\"\n lines = teams_csv.split(\"\\n\")\n for line in lines[1:]:\n if line is '':\n continue\n _update_team(line.split(','))\n _update_days()\n\n\ndef _update_days():\n if len(_teams) == 0:\n return\n days = _teams[0].days\n if len(days) == 0:\n return\n if len(days) > len(_days):\n for d in range(len(_days), len(days)):\n _days.append(dayheader(days[d]['day'], False))\n #current last day always have goals\n #_days[_current_day-1].with_goals = True\n\n\ndef _update_days_from_header_line(line):\n offset = len(__csv__player__columns)\n tokens = line.split(',')[offset:]\n days = [_parse_header_day(token) for token in tokens]\n for i in range(max(len(days), len(_days))):\n if i > len(_days):\n _days.append(days[i])\n continue\n _days[i].with_goals = days[i].with_goals\n \n\ndef _parse_header_day(token):\n d = DayHeader()\n if \"*\" in token:\n d.with_goals = True\n token = token[:-1]\n d.day = token\n return d\n\n\ndef _dump_day_header(day):\n return day.day + (\"*\" if day.with_goals else \"\")\n\n\ndef _get_properties(tokens, properties, columns):\n indexes = [ columns.index(prop) for prop in properties ]\n return [ tokens[i] for i in indexes ]\n\n\ndef _get_team_properties(tokens, properties):\n \"get team properties value form csv lines token\"\n return _get_properties(tokens, properties, __csv__team__columns)\n\n\ndef _get_or_create_team(sheet, name, short_name):\n \"get or create the team\"\n for t in _teams:\n if t.short_name == short_name:\n return t\n t = team(sheet, name, short_name)\n _teams.append(t)\n return t\n\n\ndef _update_team_days(team, days = []):\n if len(days) > len(team.days):\n team.days = days\n\n\ndef _update_team(team_tokens):\n \"update the team from the line\"\n team_properties = _get_team_properties(team_tokens, ['sheet', 'name', 'short_name'])\n t = _get_or_create_team(*team_properties)\n new_days = [_parse_day(d) for d in team_tokens[3:]]\n _update_team_days(t, new_days)\n\n\n \ndef _update_from_csv(csv):\n team_regex = re.compile(_team_regex, re.M)\n team_regex.sub(_team_replacer, csv)\n lines = _get_lines(csv)\n _set_current_day(lines)\n for p in _players:\n p._updated = False\n for line in lines:\n _parse_line(line)\n #removes players that have been removed\n _players[:] = [p for p in _players if p._updated]\n\n \ndef _team_replacer(match):\n sheet = int(match.group(1))\n if sheet == 1:\n return\n t = _get_or_create_team(str(sheet), match.group(3), match.group(2))\n\n \ndef _first_player_header_line(lines):\n for line in lines:\n if _is_player_header_line(line):\n return line\n\n\ndef _set_current_day(lines):\n global _current_day\n line = _first_player_header_line(lines)\n days = _extract_opposition(line)\n _current_day = len(days)\n\n\ndef dump():\n \"\"\"dump both players and teams\"\"\"\n return dump_teams(), dump_players()\n\n \ndef dump_players():\n \"dump players as csv\"\n columns = __csv__player__columns\n if len(_teams) > 0:\n columns = columns + [_dump_day_header(day) for day in _days]\n res = \",\".join(columns) + \"\\n\"\n return res + \"\\n\".join([_dump_player(player) for player in _players])\n\n\ndef dump_teams():\n \"dump teams as csv\"\n columns = __csv__team__columns\n if len(_teams) > 0:\n columns = columns + [day['day'] for day in _teams[0].days]\n header = \",\".join(columns)\n return header + \"\\n\" + \"\\n\".join([_dump_team(team) for team in _teams])\n\n\ndef _init():\n global _teams\n global _players\n global _current_day\n _teams = []\n _players = []\n del _days[:]\n _current_day = 0\n \n\ndef _get_lines(csv):\n lines = csv.split(\"\\n\")\n return lines[1:]\n\n\ndef _update_current_team(line):\n team_header_pattern = re.compile(r\"-{8}\")\n name_pattern = re.compile(r'[A-Za-z]+')\n if team_header_pattern.match(line):\n _set_current_team(name_pattern.search(line).group())\n\n\ndef _is_player_header_line(line):\n return line.startswith(\"Poste\")\n \n\ndef _parse_line(line):\n _update_current_team(line)\n if _is_player_header_line(line):\n days = _extract_opposition(line)\n _current_team_set_days(days)\n _update_days()\n _days[_current_day-1].with_goals = True\n return\n #skip all none notation line\n if not re.match(r'^[GDMA],', line):\n return\n player = _extract_player(line)\n p = _get_or_create_player(player.__dict__)\n notescount = max(len(p.note), len(player.note))\n for i in range(notescount):\n if i + 1 > len(p.note):\n p.note.append(player.note[i])\n p.note[_current_day - 1] = player.note[_current_day - 1]\n p._updated = True\n\ndef _dump_player(player):\n \"dump players as an formatted csv row\"\n dump = [ getattr(player,c) for c in __csv__player__columns]\n for note in player.note:\n dump.append(_dump_note(note))\n return \",\".join(dump)\n\n\ndef _dump_team(team):\n \"csv dump team\"\n dump = [getattr(team, prop) for prop in __csv__team__columns]\n for day in team.days:\n dump.append(_dump_day(day))\n return \",\".join(dump)\n\n\ndef _dump_day(day):\n return day['day'] + \" (\" + day['location'] + \"): \" + day['opponentTeam']\n\n\ndef _extract_player(line):\n \"extract players from an mpg csv line\"\n split = line.split(',')\n p = player(**{\n 'poste':split[0],\n 'nom': split[1],\n 'tit': split[2],\n 'entrees': split[3],\n 'buts': split[4],\n 'team': _current_team,\n 'note': _extract_notation(split[6:])\n })\n today_goals = _parse_note(\":\" + p.buts)\n today_note = p.note[_current_day - 1]\n _set_goals_from(today_note, today_goals)\n return p\n\n\ndef _set_goals_from(note, other):\n for prop in ['goals_pos', 'goals_neg']:\n setattr(note, prop, getattr(other, prop))\n\n\ndef _extract_notation(notes_str):\n \"extract notation from an array of notes\"\n notes = []\n for note_str in notes_str[:len(_days)]:\n notes.append(_parse_note(note_str))\n return notes\n\n\ndef _current_team_set_days(days):\n for team in _teams:\n if team.short_name != _current_team:\n continue\n _update_team_days(team, days)\n\n\ndef _dump_goals(note):\n goals = []\n for g in [note.goals_pos, note.goals_neg]:\n if g is None:\n continue\n if g < 0:\n g = \"(\" + str(g) + \")\"\n else:\n g = str(g)\n goals.append(g)\n if len(goals) == 0:\n return \"\"\n return \"/\".join(goals)\n\n\ndef _dump_note(note):\n res = \"\"\n if note.note is not None:\n res += str(note.note)\n elif note.entered:\n res += _entered_string\n elif note.injured:\n res += _injured_string\n goals = _dump_goals(note)\n if goals != \"\":\n res += \":\" + goals\n return res\n\n\ndef _parse_note(note_str):\n \"\"\"\n parse note such as 2, 2:4/, 2:(-1)/4, '<', 'Bl.' and so on.\n returns a Note object\n \"\"\"\n note_tokens = [s.strip() for s in re.split(r'[\\(\\)\\/:]', note_str)]\n token_note = note_tokens[0]\n note = Note()\n try:\n note.note = int(token_note)\n note.entered = True\n except ValueError:\n note.note = None\n if token_note == _entered_string:\n note.entered = True\n elif token_note == _injured_string:\n note.injured = True\n for g in note_tokens[1:]:\n if g == \"\":\n continue\n try:\n g = int(g)\n except ValueError:\n continue\n if g > 0:\n note.goals_pos = g\n else:\n note.goals_neg = g\n return note\n\n\ndef _extract_opposition(line):\n \"extract days{'day', 'location', 'opponentTeam'} form a csv line\"\n cells = line.split(',')\n days = []\n day_regex = re.compile(_day_regex)\n for cell in cells:\n if not day_regex.match(cell):\n continue\n d = _parse_day(cell)\n days.append(d)\n return days\n\n\ndef _parse_day(day_mpg):\n tokens = re.split(\"[:\\ \\(\\)]\", day_mpg)\n tokens = list(filter(None, tokens))\n return {\n 'day':tokens[0],\n 'location':tokens[1],\n 'opponentTeam':tokens[2]\n }\n\n\ndef _set_current_team(team):\n global _current_team\n _current_team = team\n \n\ndef _read_file(filename):\n content = \"\"\n with open(filename, 'w+') as file:\n content = file.read()\n return content\n\n\ndef _write_file(filename, content):\n with open(filename, 'w') as file:\n file.seek(0)\n file.write(content)\n file.truncate()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"directory\", help = \"the storage dir for the data\")\n parser.add_argument(\"input\", help = \"the input file, accepts xslx\")\n args = parser.parse_args()\n dir = args.directory\n input = args.input\n if not os.path.isdir(dir):\n os.makedirs(dir)\n players_filepath = os.path.join(dir, _players_csv_file_)\n teams_filepath = os.path.join(dir, _teams_csv_file_)\n players = _read_file(players_filepath)\n teams = _read_file(teams_filepath)\n update(players = players, teams = teams)\n update_xlsx(input)\n _write_file(players_filepath, dump_players())\n _write_file(teams_filepath, dump_teams())\n", "sub_path": "pystatsmpg/store.py", "file_name": "store.py", "file_ext": "py", "file_size_in_byte": 14429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 73, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 75, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 76, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 322, "usage_type": "call"}, {"api_name": "re.M", "line_number": 322, "usage_type": "attribute"}, {"api_name": "csv.split", "line_number": 388, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 393, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 394, "usage_type": "call"}, {"api_name": "re.match", "line_number": 412, "usage_type": "call"}, {"api_name": "re.split", "line_number": 515, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 545, "usage_type": "call"}, {"api_name": "re.split", "line_number": 555, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 584, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 590, "usage_type": "call"}, {"api_name": "os.path", "line_number": 590, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 591, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 592, "usage_type": "call"}, {"api_name": "os.path", "line_number": 592, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 593, "usage_type": "call"}, {"api_name": "os.path", "line_number": 593, "usage_type": "attribute"}]} +{"seq_id": "297054425", "text": "from django.db import models\nfrom django.utils import timezone\n\nTRIMESTER_CHOICES = (\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n)\n\nGRADE_LEVEL_CHOICES = (\n ('K', 'K'),\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n ('4', '4'),\n ('5', '5'),\n ('6', '6'),\n ('7', '7'),\n ('8', '8'),\n)\n\nSUBJECT_CHOICES = (\n ('ELA', 'ELA'),\n ('Math', 'Math'),\n)\n\nclass Teacher(models.Model):\n user = models.OneToOneField('auth.User', on_delete=models.CASCADE, primary_key=True)\n grade = models.CharField(max_length=2, choices=GRADE_LEVEL_CHOICES)\n subject = models.CharField(max_length=10, choices=SUBJECT_CHOICES)\n first_login = models.BooleanField(default=True, editable=False)\n\n def __str__(self):\n return self.user.first_name + \" \" + self.user.last_name + \" (\" + self.user.username + \")\"\n\n# Units have a title, description, and a start and end date. They can be covered.\nclass Unit(models.Model):\n author = models.ForeignKey('auth.User')\n teacher = models.ForeignKey('Teacher')\n title = models.CharField(max_length=200) \n trimester = models.CharField(max_length=2, choices=TRIMESTER_CHOICES, default='1', help_text=\"Trimester 1: Aug. 23 to Nov. 22 <br />Trimester 2: Nov. 28 to Mar. 16 <br />Trimester 3: Mar. 20 to Jun. 7\")\n start_date = models.DateField()\n end_date = models.DateField()\n theme = models.TextField(max_length=400, null=True, blank=True, help_text=\"What are the key themes, concepts and understandings that link together the individual units of the year in your curriculum?\")\n pillar = models.TextField(max_length=400, null=True, blank=True, help_text=\"Where do you see Caliber’s core pillars in the curriculum, or how will you use the curriculum to develop experiences that meet and support these pillars for your students? - Smart (College Ready), Heart (Emotional Intelligence), Act (Agent of Change), Think (Critical Thinker)\")\n essential_question = models.TextField(max_length=400, null=True, blank=True, help_text=\"Essential questions are overarching questions that your students will be able to uncover over the unit - they should be questions that result in a conclusion drawn by the student, not recited facts.\")\n covered = models.BooleanField(help_text=\"Check when you have completed the unit\")\n share_with_all = models.BooleanField(editable=False, default=False, help_text=\"Check this to share the unit out with all Caliber teachers (Team leads and admin only)\")\n share_with_content = models.BooleanField(default=False, help_text=\"Check this to share out with those teaching the same content\")\n date_shared = models.DateField(null=True, blank=True, help_text=\"This is the date teachers see when the unit has been shared out to them.\")\n text = models.TextField(max_length=400, null=True, blank=True, help_text=\"Comments (optional)\")\n show = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title + \" (\" + self.author.username + \")\"\n\n# Standard grouping\nclass Claim(models.Model):\n grade = models.CharField(max_length=2, choices=GRADE_LEVEL_CHOICES, default='3') \n subject = models.CharField(max_length=10, choices=SUBJECT_CHOICES, default='E')\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return self.name + \" (\" + self.grade + \" - \" + self.subject + \")\"\n\nclass Indicator(models.Model):\n claim = models.ForeignKey('Claim') \n name = models.CharField(max_length=200)\n\n def __str__(self):\n return self.name + \" (\" + self.claim.name + \" - \" + self.claim.grade + \" - \" + self.claim.subject + \")\"\n\n# Standards can be found by their grade level. They are attached to units. They can be covered.\nclass Standard(models.Model):\n grade = models.CharField(max_length=2, choices=GRADE_LEVEL_CHOICES, default='3')\n subject = models.CharField(max_length=10, choices=SUBJECT_CHOICES, default='Math')\n claim = models.ForeignKey('Claim', on_delete=models.PROTECT, default=50)\n indicator = models.ForeignKey('Indicator', on_delete=models.PROTECT, default=100)\n name = models.CharField(max_length=200)\n text = models.TextField()\n unit = models.ForeignKey('Unit', on_delete=models.SET(1), default=\"1\") #3 == primary key of \"Unassigned\" unit\n trimester_1 = models.BooleanField(default=\"\")\n trimester_2 = models.BooleanField(default=\"\")\n trimester_3 = models.BooleanField(default=\"\")\n\n def __str__(self):\n return self.claim.name + \" - \" + self.name + \" - \" + self.text\n\n# Additional grade level standards\nclass ExtraStandard(models.Model):\n standard = models.ForeignKey('Standard')\n unit = models.ForeignKey('Unit')\n\n def __str__(self):\n return self.standard.name + \" --> \" + self.unit.title\n\nclass RemedialStandard(models.Model):\n standard = models.ForeignKey('Standard', related_name='%(class)s_grade_standard')\n remedial = models.ForeignKey('Standard', related_name='%(class)s_remedial_standard')\n add_to_your_plan = models.BooleanField()\n\n # def __str__(self):\n # return self.pk\n\nclass StretchStandard(models.Model):\n standard = models.ForeignKey('Standard', related_name='%(class)s_grade_standard')\n stretch = models.ForeignKey('Standard', related_name='%(class)s_stretch_standard')\n add_to_your_plan = models.BooleanField()\n\n # def __str__(self):\n # return self.stretch.name\n\n# Resources are attached to standards. They are grouped by content (lesson plans, materials).\n# They are links to sites. They are attached to users (determines editablility)\nclass Resource(models.Model):\n author = models.ForeignKey('auth.User')\n standard = models.ForeignKey('Standard', on_delete=models.PROTECT, help_text=\"How can users find your resource on the 'Resources' page?\")\n\n CONTENT_CHOICES = (\n ('LP', 'Lesson Plan'),\n ('IP', 'Independent Practice'),\n ('WC', 'Whole Class'),\n ('SG', 'Small Group'),\n ('M', 'Multimedia'),\n ('GO', 'Graphic Organizers'),\n ('O', 'Other'),\n )\n content = models.CharField(max_length=100, choices=CONTENT_CHOICES, blank=True, default=\"\", editable=False)\n name = models.CharField(max_length=100, help_text=\"What is this about?\")\n URL = models.URLField(max_length=300, default=\"http://\", help_text=\"Where is this? (Links to videos, images, or scanned PDFs on Google Drive are all acceptable)\")\n description = models.TextField(max_length=400, help_text=\"What makes this a good resource?\")\n date = models.DateField(null=True, blank=True, default=timezone.now, help_text=\"When was this resource created?\")\n share_with_all = models.BooleanField(editable=False, default=False, help_text=\"Check this to share the unit out with all Caliber teachers (Team leads and admin only)\")\n make_private = models.BooleanField(default=False, help_text=\"Check this to make this resource visible only to you.\")\n\n def __str__(self):\n return self.name + \" (\" + self.standard.name + \")\"\n\nclass AddedResource(models.Model):\n resource = models.ForeignKey('Resource')\n extra_standard = models.ForeignKey('ExtraStandard', default=\"\")\n unit = models.ForeignKey('Unit')\n date = models.DateField(null=True, blank=True, default=timezone.now, help_text=\"Date you plan to use the resource\")\n\n def __str__(self):\n return self.resource.name + \" --> \" + self.unit.title\n", "sub_path": "unitresource/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 7400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.models.Model", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 74, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.SET", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 98, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 99, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 105, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 108, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 115, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 115, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 116, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 116, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 117, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 117, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 117, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 129, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 129, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 130, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 130, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 131, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 131, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 132, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 132, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 132, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 132, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 133, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 134, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 134, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 139, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 139, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 140, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 141, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 141, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 142, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 143, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 143, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 143, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "152756116", "text": "#!/usr/bin/python3\n\"\"\"Script that starts a Flask web application\"\"\"\nfrom models import storage\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.teardown_appcontext\ndef close():\n \"\"\"function that call storage.close\"\"\"\n storage.close()\n\n@app.route('/states_list', strict_slashes=False)\ndef body():\n \"\"\"function that call storage.close\"\"\"\n states = storage.all()\n return render_template('7-states_list.html', states=states)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)", "sub_path": "web_flask/7-states_list.py", "file_name": "7-states_list.py", "file_ext": "py", "file_size_in_byte": 523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "models.storage.close", "line_number": 11, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 11, "usage_type": "name"}, {"api_name": "models.storage.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "253551599", "text": "from django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .models import Invoice, Product, Customer, InvoiceProduct\nfrom rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST\nimport datetime\n# Create your views here.\n\n@api_view(['POST'])\ndef invoice_create_view(request):\n if request.method == \"POST\":\n data = {}\n invoice_number = request.data.get(\"invoice_number\", \"\")\n customer_id = request.data.get(\"customer_id\", \"\")\n product_list = request.data.get(\"product_list\", [])\n due_date = request.data.get(\"due_date\", \"\")\n total_amount = request.data.get(\"total_amount\", \"\")\n\n try:\n due_date = datetime.datetime.strptime(due_date,\"%Y-%m-%d %H:%M:%S\")\n except Exception as e:\n raise Exception(\"Due date format should be in %Y-%m-%d %H:%M:%S\")\n \n\n #fetch customer details , \n customer = Customer.objects.get(pk=customer_id)\n\n #invoice\n i1= Invoice.objects.create(invoice_number=invoice_number,customer=customer,total_amount=total_amount, due_date=due_date, is_manual=True)\n i1.save()\n\n for product in product_list:\n p1 = Product.objects.get(id=product['id'])\n quantity = product['quantity']\n ip = InvoiceProduct(invoice=i1, product=p1,product_quantity=quantity)\n ip.save()\n\n return Response({\"Success\": True}, status=HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef invoice_get_view(request):\n try:\n invoice_number = request.data.get(\"invoice_number\", \"\")\n i1 = Invoice.objects.filter(invoice_number=invoice_number)\n if i1:\n if i1[0].is_manual or i1[0].is_digitized:\n data = {}\n data['invoice_number'] = i1[0].invoice_number\n data['customer_id'] = i1[0].customer_id\n data['due_date'] = i1[0].due_date\n data['total_amount'] = i1[0].total_amount\n data['product_list'] = []\n ip1 = InvoiceProduct.objects.filter(invoice=i1[0]).select_related('product')\n for invoice_product in ip1:\n data['product_list'].append({'id': invoice_product.product.id, 'quantity': invoice_product.product_quantity})\n return Response({\"Success\": True, \"data\": data}, status=HTTP_200_OK)\n else:\n return Response({\"Success\": True, \"data\": {}}, status=HTTP_200_OK)\n else:\n return Response({\"Success\": True, \"data\": {}}, status=HTTP_200_OK)\n except Exception as e:\n raise e\n\n@api_view(['POST'])\ndef invoice_update_view(request):\n if request.method == \"POST\":\n data = {}\n invoice_number = request.data.get(\"invoice_number\", None)\n data['customer_id'] = request.data.get(\"customer_id\", None)\n product_list = request.data.get(\"product_list\", None)\n data['due_date'] = request.data.get(\"due_date\", None)\n data['total_amount'] = request.data.get(\"total_amount\", None)\n\n data = {k: v for k, v in data.items() if v is not None}\n try:\n invoice_qs = Invoice.objects.filter(invoice_number=invoice_number)\n if invoice_qs:\n if 'customer_id' in data:\n customer = Customer.objects.get(pk=data['customer_id'])\n invoice_qs[0].customer = customer\n invoice_qs[0].save()\n if 'due_date' in data:\n try:\n data['due_date'] = datetime.datetime.strptime(data['due_date'],\"%Y-%m-%d %H:%M:%S\")\n except Exception as e:\n raise Exception(\"Due date format should be in %Y-%m-%d %H:%M:%S\")\n\n invoice_qs.update(**data)\n\n if product_list:\n for product in product_list:\n p1 = Product.objects.get(id=product['id'])\n quantity = product['quantity']\n ip = InvoiceProduct(invoice=invoice_qs[0], product=p1,product_quantity=quantity)\n ip.save()\n return Response({\"Success\": True}, status=HTTP_200_OK)\n\n\n else:\n return Response({\"Success\": False, \"Message\": \"No Invoice exist with given invoice number please check\"}, status=HTTP_400_BAD_REQUEST)\n pass\n except Exception as e:\n raise e\n", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3987, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Customer.objects.get", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Customer.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.Customer", "line_number": 26, "usage_type": "name"}, {"api_name": "models.Invoice.objects.create", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Invoice.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Invoice", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Product.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 33, "usage_type": "name"}, {"api_name": "models.InvoiceProduct", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 38, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Invoice.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Invoice.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.Invoice", "line_number": 45, "usage_type": "name"}, {"api_name": "models.InvoiceProduct.objects.filter", "line_number": 54, "usage_type": "call"}, {"api_name": "models.InvoiceProduct.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.InvoiceProduct", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 57, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 57, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 59, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 59, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Invoice.objects.filter", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Invoice.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.Invoice", "line_number": 77, "usage_type": "name"}, {"api_name": "models.Customer.objects.get", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Customer.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Customer", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "models.Product.objects.get", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 93, "usage_type": "name"}, {"api_name": "models.InvoiceProduct", "line_number": 95, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 97, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 101, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 101, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "249614122", "text": "import sys\nimport os\nimport os.path as osp\nimport time\nimport datetime\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nfrom default_config import get_default_config, imagedata_kwargs, optimizer_kwargs, lr_scheduler_kwargs\nfrom torchreid.data import ImageDataManager\nfrom torchreid.losses import CrossEntropyLoss\nfrom torchreid.metrics import accuracy, compute_distance_matrix, evaluate_rank\nfrom torchreid.models import build_model\nfrom torchreid.optim import build_lr_scheduler, build_optimizer\n\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger\nfrom torchreid.utils.reidtools import visualize_ranked_results\nfrom torchreid.utils.rerank import re_ranking\nfrom torchreid.utils.tools import check_isfile, set_random_seed\nfrom torchreid.utils.torchtools import save_checkpoint, load_pretrained_weights, open_all_layers,\\\n open_specified_layers, resume_from_checkpoint\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--config-file', type=str, default='', help='path to config file')\n parser.add_argument('--gpu-devices', type=str, default='', )\n parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,\n help='Modify config options using the command-line')\n args = parser.parse_args()\n\n cfg = get_default_config()\n if args.config_file:\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n set_random_seed(cfg.train.seed)\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n log_name = 'test.log' if cfg.test.evaluate else 'train.log'\n log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')\n sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))\n print('Show configuration\\n{}\\n'.format(cfg))\n torch.backends.cudnn.benchmark = True\n\n datamanager = ImageDataManager(**imagedata_kwargs(cfg))\n trainloader, queryloader, galleryloader = datamanager.return_dataloaders()\n print('Building model: {}'.format(cfg.model.name))\n model = build_model(cfg.model.name, datamanager.num_train_pids, 'softmax', pretrained=cfg.model.pretrained)\n\n if cfg.model.load_weights and check_isfile(cfg.model.load_weights):\n load_pretrained_weights(model, cfg.model.load_weights)\n\n model = nn.DataParallel(model).cuda()\n\n criterion = CrossEntropyLoss(datamanager.num_train_pids, label_smooth=cfg.loss.softmax.label_smooth)\n optimizer = build_optimizer(model, **optimizer_kwargs(cfg))\n scheduler = build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))\n\n if cfg.model.resume and check_isfile(cfg.model.resume):\n cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume, model, optimizer=optimizer)\n\n if cfg.test.evaluate:\n distmat = evaluate(model, queryloader, galleryloader, dist_metric=cfg.test.dist_metric,\n normalize_feature=cfg.test.normalize_feature, rerank=cfg.test.rerank, return_distmat=True)\n if cfg.test.visrank:\n visualize_ranked_results(distmat, datamanager.return_testdataset(), 'image', width=cfg.data.width,\n height=cfg.data.height, save_dir=osp.join(cfg.data.save_dir, 'visrank'))\n return\n\n time_start = time.time()\n print('=> Start training')\n for epoch in range(cfg.train.start_epoch, cfg.train.max_epoch):\n train(epoch, cfg.train.max_epoch, model, criterion, optimizer, trainloader,\n fixbase_epoch=cfg.train.fixbase_epoch, open_layers=cfg.train.open_layers)\n scheduler.step()\n if (epoch + 1) % cfg.test.eval_freq == 0 or (epoch + 1) == cfg.train.max_epoch:\n rank1 = evaluate(model, queryloader, galleryloader, dist_metric=cfg.test.dist_metric,\n normalize_feature=cfg.test.normalize_feature, rerank=cfg.test.rerank)\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'epoch': epoch + 1,\n 'rank1': rank1,\n 'optimizer': optimizer.state_dict(),\n }, cfg.data.save_dir)\n elapsed = round(time.time() - time_start)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print('Elapsed {}'.format(elapsed))\n\n\ndef train(epoch, max_epoch, model, criterion, optimizer, trainloader, fixbase_epoch=0, open_layers=None):\n losses = AverageMeter()\n accs = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n model.train()\n if (epoch + 1) <= fixbase_epoch and open_layers is not None:\n print('* Only train {} (epoch: {}/{})'.format(open_layers, epoch + 1, fixbase_epoch))\n open_specified_layers(model, open_layers)\n else:\n open_all_layers(model)\n num_batches = len(trainloader)\n end = time.time()\n for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):\n data_time.update(time.time() - end)\n imgs = imgs.cuda()\n pids = pids.cuda()\n optimizer.zero_grad()\n outputs = model(imgs)\n loss = criterion(outputs, pids)\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time() - end)\n losses.update(loss.item(), pids.size(0))\n accs.update(accuracy(outputs, pids)[0].item())\n if (batch_idx + 1) % 20 == 0:\n eta_seconds = batch_time.avg * (num_batches - (batch_idx + 1) + (max_epoch - (epoch + 1)) * num_batches)\n eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))\n print('Epoch: [{0}/{1}][{2}/{3}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc {acc.val:.2f} ({acc.avg:.2f})\\t'\n 'Lr {lr:.6f}\\t'\n 'eta {eta}'.format(\n epoch + 1, max_epoch, batch_idx + 1, num_batches,\n batch_time=batch_time,\n data_time=data_time,\n loss=losses,\n acc=accs,\n lr=optimizer.param_groups[0]['lr'],\n eta=eta_str))\n end = time.time()\n\n\ndef evaluate(model, queryloader, galleryloader, dist_metric='euclidean', normalize_feature=False, rerank=False,\n return_distmat=False):\n batch_time = AverageMeter()\n model.eval()\n with torch.no_grad():\n print('Extracting features from query set ...')\n qf, q_pids, q_camids = [], [], []\n for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):\n imgs = imgs.cuda()\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n features = features.data.cpu()\n qf.append(features)\n q_pids.extend(pids)\n q_camids.extend(camids)\n qf = torch.cat(qf, 0)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))\n\n print('Extracting features from gallery set ...')\n gf, g_pids, g_camids = [], [], []\n for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):\n imgs = imgs.cuda()\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n features = features.data.cpu()\n gf.append(features)\n g_pids.extend(pids)\n g_camids.extend(camids)\n gf = torch.cat(gf, 0)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))\n\n print('Speed: {:.4f} sec/batch'.format(batch_time.avg))\n\n if normalize_feature:\n print('Normalzing features with L2 norm ...')\n qf = F.normalize(qf, p=2, dim=1)\n gf = F.normalize(gf, p=2, dim=1)\n\n print('Computing distance matrix with metric={} ...'.format(dist_metric))\n distmat = compute_distance_matrix(qf, gf, dist_metric)\n distmat = distmat.numpy()\n\n if rerank:\n print('Applying person re-ranking ...')\n distmat_qq = compute_distance_matrix(qf, qf, dist_metric)\n distmat_gg = compute_distance_matrix(gf, gf, dist_metric)\n distmat = re_ranking(distmat, distmat_qq, distmat_gg)\n\n print('Computing CMC and mAP ...')\n cmc, mAP = evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids)\n print('** Results **')\n print('mAP: {:.1%}'.format(mAP))\n print('CMC curve')\n for r in [1, 5, 10, 20]:\n print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))\n\n if return_distmat:\n return distmat\n\n return cmc[0]\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "train_soft.py", "file_name": "train_soft.py", "file_ext": "py", "file_size_in_byte": 8786, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 29, "usage_type": "attribute"}, {"api_name": "argparse.REMAINDER", "line_number": 32, "usage_type": "attribute"}, {"api_name": "default_config.get_default_config", "line_number": 36, "usage_type": "call"}, {"api_name": "torchreid.utils.tools.set_random_seed", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 41, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torchreid.utils.loggers.Logger", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.backends", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torchreid.data.ImageDataManager", "line_number": 48, "usage_type": "call"}, {"api_name": "default_config.imagedata_kwargs", "line_number": 48, "usage_type": "call"}, {"api_name": "torchreid.models.build_model", "line_number": 51, "usage_type": "call"}, {"api_name": "torchreid.utils.tools.check_isfile", "line_number": 53, "usage_type": "call"}, {"api_name": "torchreid.utils.torchtools.load_pretrained_weights", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torchreid.losses.CrossEntropyLoss", "line_number": 58, "usage_type": "call"}, {"api_name": "torchreid.optim.build_optimizer", "line_number": 59, "usage_type": "call"}, {"api_name": "default_config.optimizer_kwargs", "line_number": 59, "usage_type": "call"}, {"api_name": "torchreid.optim.build_lr_scheduler", "line_number": 60, "usage_type": "call"}, {"api_name": "default_config.lr_scheduler_kwargs", "line_number": 60, "usage_type": "call"}, {"api_name": "torchreid.utils.tools.check_isfile", "line_number": 62, "usage_type": "call"}, {"api_name": "torchreid.utils.torchtools.resume_from_checkpoint", "line_number": 63, "usage_type": "call"}, {"api_name": "torchreid.utils.reidtools.visualize_ranked_results", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "name"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "torchreid.utils.torchtools.save_checkpoint", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 89, "usage_type": "call"}, {"api_name": "torchreid.utils.avgmeter.AverageMeter", "line_number": 94, "usage_type": "call"}, {"api_name": "torchreid.utils.avgmeter.AverageMeter", "line_number": 95, "usage_type": "call"}, {"api_name": "torchreid.utils.avgmeter.AverageMeter", "line_number": 96, "usage_type": "call"}, {"api_name": "torchreid.utils.avgmeter.AverageMeter", "line_number": 97, "usage_type": "call"}, {"api_name": "torchreid.utils.torchtools.open_specified_layers", "line_number": 101, "usage_type": "call"}, {"api_name": "torchreid.utils.torchtools.open_all_layers", "line_number": 103, "usage_type": "call"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 116, "usage_type": "call"}, {"api_name": "torchreid.metrics.accuracy", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 121, "usage_type": "call"}, {"api_name": "time.time", "line_number": 136, "usage_type": "call"}, {"api_name": "torchreid.utils.avgmeter.AverageMeter", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 143, "usage_type": "call"}, {"api_name": "time.time", "line_number": 148, "usage_type": "call"}, {"api_name": "time.time", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 157, "usage_type": "call"}, {"api_name": "time.time", "line_number": 164, "usage_type": "call"}, {"api_name": "time.time", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 181, "usage_type": "name"}, {"api_name": "torchreid.metrics.compute_distance_matrix", "line_number": 184, "usage_type": "call"}, {"api_name": "torchreid.metrics.compute_distance_matrix", "line_number": 189, "usage_type": "call"}, {"api_name": "torchreid.metrics.compute_distance_matrix", "line_number": 190, "usage_type": "call"}, {"api_name": "torchreid.utils.rerank.re_ranking", "line_number": 191, "usage_type": "call"}, {"api_name": "torchreid.metrics.evaluate_rank", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "431842029", "text": "# coding:utf-8\r\n\r\nimport datetime\r\nimport pandas as pd\r\nimport pyspark.sql.functions as f\r\nfrom pyspark.context import SparkContext\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql import Window\r\nfrom pyspark.sql import types as t\r\nfrom pyspark.sql.types import DoubleType\r\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.spatial.distance import cdist\r\nfrom sklearn.metrics import silhouette_score\r\nfrom pyspark.ml.clustering import KMeans\r\nfrom pyspark.ml.feature import MinMaxScaler, VectorAssembler\r\n\r\n\r\ndef load_data():\r\n\r\n ods_mk_xcx_log_action = spark.sql(\r\n 'select * from bgy_data_platform.ods_mk_xcx_log_action') \\\r\n .withColumn('date', f.to_date(f.col('channelid'))) \\\r\n .withColumn('channelid', f.to_timestamp(f.col('channelid'))) \\\r\n .cache()\r\n ods_mk_xcx_arean = spark.sql(\r\n 'select * from bgy_data_platform.ods_mk_xcx_arean').cache()\r\n ods_mk_xcx_card_businesscard_c = spark.sql(\r\n 'select * from bgy_data_platform.ods_mk_xcx_card_businesscard_c').cache()\r\n\r\n return ods_mk_xcx_log_action, ods_mk_xcx_arean, ods_mk_xcx_card_businesscard_c\r\n\r\n\r\ndef get_time_range(df, win_long=14, win_step=5, win_num=10):\r\n '''\r\n 得到训练集的时间范围,\r\n win_long:统计窗口长度\r\n win_step:滑动窗口距离\r\n win_num:滑动窗口的个数\r\n '''\r\n\r\n num_days = win_long + win_step*(win_num-1)\r\n max_date = df \\\r\n .select(f.max(f.col('date')).alias('max_date')) \\\r\n .collect()[0]['max_date']\r\n min_date = max_date - datetime.timedelta(num_days)\r\n\r\n# for i in range(win_num):\r\n# start_time = min_date + datetime.timedelta(i * win_step)\r\n# ent_time = min_date + datetime.timedelta(i * win_step + win_step)\r\n# print('Window {}:'.format(i), start_time, '->', ent_time)\r\n\r\n print('\\nmin_date:{}'.format(min_date))\r\n print('max_date:{}'.format(max_date))\r\n\r\n return min_date, max_date\r\n\r\n\r\ndef Quartile_anomaly(df, cols):\r\n\r\n print('\\n')\r\n for col in cols:\r\n df = df.withColumn(col, df[col].cast(DoubleType()))\r\n quantiles = df.approxQuantile(col, [0.25, 0.75], 0)\r\n IQR = quantiles[1] - quantiles[0]\r\n max_value = quantiles[1] + 1.5 * IQR\r\n print(\"-\" * 20, col, \"0.25_quantiles and 0.75_quantiles:\",\r\n quantiles, \"Abnormal threshold\", max_value)\r\n\r\n df = df.withColumn(col, f.when(f.col(col) > max_value, max_value)\r\n .otherwise(f.col(col)))\r\n\r\n return df\r\n\r\n\r\ndef Quartitle_095(df, cols):\r\n\r\n for col in cols:\r\n df = df.withColumn(col, df[col].cast(DoubleType()))\r\n quantiles = df.approxQuantile(col, [0.95], 0)\r\n value_095 = quantiles[0]\r\n print(\"-\" * 20, col, \"0.95_quantiles:\", value_095)\r\n\r\n df = df.withColumn(col, f.when(f.col(col) > value_095, value_095)\r\n .otherwise(f.col(col)))\r\n\r\n return df\r\n\r\n\r\ndef filter_staff(df_log_action, df_card_businesscard):\r\n '''\r\n 剔除已创建名片的的用户当作是剔除顾问\r\n '''\r\n #\r\n df_staff_id = df_card_businesscard\\\r\n .select(f.col('cstid')).distinct()\r\n\r\n #\r\n df_filter_staff = df_log_action\\\r\n .join(df_staff_id, 'cstid', how='left_anti')\r\n\r\n #\r\n num_cstid = df_log_action\\\r\n .select(f.countDistinct(f.col('cstid')).alias('num'))\\\r\n .collect()[0]['num']\r\n num_staff = df_staff_id.count()\r\n num_after_filter_staff = df_filter_staff\\\r\n .select(f.countDistinct(f.col('cstid')).alias('num')) \\\r\n .collect()[0]['num']\r\n num_click = df_log_action.count()\r\n num_click_filter_staff = df_filter_staff.count()\r\n\r\n print('the num of cstid is {:,}'.format(num_cstid))\r\n print('the num of staff is {:,}'.format(num_staff))\r\n print('the num of cstid filter staff is {:,}'.format(\r\n num_after_filter_staff))\r\n\r\n print('\\nthe num of click is {:,}'.format(num_click))\r\n print('the num of click filter staff is {:,}'.format(\r\n num_click_filter_staff))\r\n\r\n return df_filter_staff\r\n\r\n\r\ndef filter_action(df):\r\n '''\r\n 只筛选部分对意向度有作用的行为\r\n '''\r\n #\r\n num_before_filter = df.count()\r\n df = df.filter(\r\n (f.col('code') == 179) |\r\n (f.col('code') == 305) |\r\n (f.col('code') == 319) |\r\n (f.col('code') == 327) |\r\n (f.col('code') == 342) |\r\n (f.col('code') == 350) |\r\n (f.col('code') == 351) |\r\n (f.col('code') == 364) |\r\n (f.col('code') == 374) |\r\n (f.col('code') == 385) |\r\n (f.col('code').between(392, 419)) |\r\n (f.col('code') == 439) |\r\n (f.col('code') == 440)\r\n )\r\n num_after_filter = df.count()\r\n\r\n print('\\nnum_before_filter is {:,}'.format(num_before_filter))\r\n print('num_after_filter is {:,}'.format(num_after_filter))\r\n\r\n return df\r\n\r\n\r\ndef weight_action(df):\r\n '''\r\n 给不同的操作行为赋予不同的意向度权重\r\n '''\r\n\r\n @pandas_udf('double', PandasUDFType.SCALAR)\r\n def weight_action_func(pd_series):\r\n pd_series = pd_series.map(weight.value)\r\n pd_series.fillna(0.1, inplace=True)\r\n\r\n return pd_series\r\n\r\n weight = {\r\n 179: 0.5, # 电子物料\r\n 305: 1, # 项目门户-分享\r\n 319: 2, # 主题页访问\r\n 327: 4, # 拼团活动\r\n 342: 1, # 九宫格分享\r\n 350: 2, # 助力活动\r\n 351: 5, # 助力活动-分享\r\n 364: 2, # 进入页面\r\n 374: 8, # 提交表单操作\r\n 385: 1.5, # 文章详情访问\r\n 393: 3, # 访问楼盘详情页\r\n 394: 1.5, # 轮播图组件-点击轮播图\r\n 395: 0.5, # 楼盘简介组件-点击收藏\r\n 396: -1, # 楼盘简介组件-取消收藏\r\n 397: 1, # 楼盘详情组件-点击楼盘地址地图\r\n 398: 1, # 楼盘详情组件-关注变价\r\n 399: 8, # 楼盘详情组件-关注变价成功\r\n 400: 1, # 楼盘详情组件-开盘提醒\r\n 401: 8, # 楼盘详情组件-开盘提醒成功\r\n 402: 2.5, # 户型组件-点击户型\r\n 403: 2.5, # 相关资讯组件-点击资讯\r\n 404: 1.5, # 销售服务组件-点击服务\r\n 405: 2, # 楼盘周边组件-点击地图\r\n 406: 0.5, # 销售顾问组件-点击名片\r\n 407: 1, # 销售顾问组件-拨打电话\r\n 408: 1, # 底部栏组件-分享好友\r\n 409: 1, # 底部栏组件-生成海报\r\n 410: 0.5, # 底部栏组件-销售热线\r\n 411: 1.5, # 底部栏组件-我要咨询\r\n 412: 0.5, # 轮播图组件-点击查看更多\r\n 413: 3, # 物料组件-访问\r\n 414: 0.5, # 物料组件-点击物料\r\n 415: 2, # 物料组件-分享\r\n 419: 2.5, # 户型组件-查看全部\r\n 439: 2, # 营销工具平台\r\n 440: 2, # 浏览红包雨\r\n }\r\n weight = sc.broadcast(weight)\r\n\r\n df = df \\\r\n .withColumn('weight_action', weight_action_func(f.col('code')))\r\n\r\n return df\r\n\r\n\r\ndef effective_visit(df):\r\n '''\r\n 统计访客访问楼盘的有效访问次数:\r\n 如访客A在 5月10日10:00 访问了楼盘a, 10:30又访问了楼盘,则10:30那次访问为非有效访问,\r\n 只有间隔在1 hour上,有效访问次数才加1,\r\n 如访客B在 5月10日10:00 访问了楼盘a,中午12:00又访问了改楼盘,则有效访问次数为2\r\n '''\r\n window = Window.partitionBy('cstid', 'areaid').orderBy('channelid')\r\n df = df \\\r\n .withColumn('unix_timestamp', f.unix_timestamp(f.col('channelid'))) \\\r\n .withColumn('shift_unix_timestamp', f.lag(f.col('unix_timestamp')).over(window)) \\\r\n .withColumn('effective_visit', f.when((f.col('unix_timestamp') - f.col('shift_unix_timestamp')) > 3600, 1).otherwise(0)) \\\r\n .withColumn('effective_visit', f.when(f.isnull(f.col('shift_unix_timestamp')), 1).otherwise(f.col('effective_visit'))) \\\r\n .withColumn('effective_visit', f.when(f.isnull(f.col('areaid')), 0).otherwise(f.col('effective_visit'))) \\\r\n .drop('unix_timestamp', 'shift_unix_timestamp')\r\n\r\n return df\r\n\r\n\r\ndef time_on_page(df):\r\n '''\r\n 折算出访客停留在页面的时间\r\n '''\r\n\r\n window = Window.partitionBy('cstid').orderBy('channelid')\r\n df = df \\\r\n .withColumn('unix_timestamp', f.unix_timestamp(f.col('channelid'))) \\\r\n .withColumn('shift_unix_timestamp', f.lag(f.col('unix_timestamp'), -1).over(window)) \\\r\n .withColumn('time_on_page', f.col('shift_unix_timestamp') - f.col('unix_timestamp')) \\\r\n .withColumn('time_on_page', f.when(f.col('time_on_page') > 90, 10).otherwise(f.col('time_on_page'))) \\\r\n .withColumn('time_on_page', f.when(f.isnull(f.col('time_on_page')), 10).otherwise(f.col('time_on_page'))) \\\r\n .drop('unix_timestamp', 'shift_unix_timestamp')\r\n\r\n return df\r\n\r\n\r\ndef feat_eng(df):\r\n\r\n df = weight_action(df)\r\n df = effective_visit(df)\r\n df = time_on_page(df)\r\n\r\n return df\r\n\r\n\r\ndef agg_feat(df):\r\n '''\r\n 生成每一个访客对过去两周有浏览过的楼盘项目的特征,\r\n 输出字段为:\r\n 访客ID 楼盘ID 特征1 特征2 特征3 特征4 。。。\r\n\r\n 以下统计范围为过去两周的访问数据:\r\n Feature:\r\n - weight_action_sum:访客访问楼盘行为的权重和\r\n - num_of_visits:访客访问楼盘的有效访问次数\r\n - total_time:访客访问楼盘的页面总停留时间\r\n - weight_action_ratio:访客访问该楼盘的行为权重和 / 该访客的所有权重和\r\n - num_of_visits_ratio:访客访问该楼盘的有效次数 / 该访客的所有访客楼盘和\r\n - total_time_ratio:访客访问该楼盘的时间 / 该访客的所有访问时间\r\n\r\n '''\r\n\r\n #\r\n df_train_agg = df\\\r\n .filter(f.col('areaid') != 'NULL')\\\r\n .groupBy(f.window(\"channelid\", \"14 days\", \"5 days\"), \"cstid\", 'areaid')\\\r\n .agg(\r\n f.sum(f.col('weight_action')).alias('weight_action_sum'),\r\n f.sum(f.col(\"effective_visit\")).alias('num_of_visits'),\r\n f.sum(f.col(\"time_on_page\")).alias(\"total_time\")\r\n )\r\n\r\n #\r\n df_train_agg = df_train_agg\\\r\n .filter(df_train_agg.window.start >= min_date)\\\r\n .filter(df_train_agg.window.end <= max_date)\r\n\r\n #\r\n df_train_agg = Quartile_anomaly(\r\n df_train_agg, cols=['weight_action_sum', 'total_time'])\r\n df_train_agg = Quartitle_095(df_train_agg, cols=['num_of_visits'])\r\n\r\n #\r\n win = Window.partitionBy('window', 'cstid')\r\n df_train_agg = df_train_agg\\\r\n .withColumn('weight_action_ratio', f.col('weight_action_sum') / f.sum('weight_action_sum').over(win)) \\\r\n .withColumn('num_of_visits_ratio', f.col('num_of_visits') / f.sum('num_of_visits').over(win)) \\\r\n .withColumn('total_time_ratio', f.col('total_time') / f.sum('total_time').over(win)) \\\r\n .fillna(1, subset=['num_of_visits_ratio', 'total_time_ratio'])\r\n\r\n return df_train_agg\r\n\r\n\r\ndef normalization(df):\r\n \r\n # 向量化\r\n vector = VectorAssembler(inputCols=[\r\n 'weight_action_sum', 'num_of_visits', 'total_time'], outputCol='features_vect')\r\n vector_df = vector.transform(df_train_agg)\r\n vector_df.cache()\r\n\r\n # 归一化\r\n mmScaler = MinMaxScaler(inputCol='features_vect',\r\n outputCol=\"feature_scaler\")\r\n mmScaler_model = mmScaler.fit(vector_df)\r\n mmScaler_df = mmScaler_model.transform(vector_df)\r\n mmScaler_model.write().overwrite().save(\"./凤凰云/mmScaler_model\") # 保存归一化模型,预测时要用\r\n print('normalization completed!')\r\n\r\n return mmScaler_df\r\n\r\n\r\ndef model(df):\r\n\r\n km = KMeans(\r\n featuresCol=\"feature_scaler\",\r\n predictionCol=\"prediction\",\r\n k=8,\r\n seed=4,\r\n )\r\n km = km.fit(df)\r\n df = km.transform(df)\r\n km.write().overwrite().save(\"./凤凰云/kmeans_model\") # 保存聚类模型,预测时要用\r\n print('model completed!')\r\n \r\n return df, model\r\n\r\n\r\ndef result_processing(df):\r\n\r\n tmp = df.select('weight_action_sum', 'num_of_visits', 'total_time', 'prediction')\\\r\n .groupby('prediction')\\\r\n .agg(f.mean('weight_action_sum').alias('weight_action_sum'), f.mean('num_of_visits').alias('num_of_visits'), f.mean('total_time').alias('total_time'))\\\r\n .toPandas() # 转成pandas进行处理\r\n \r\n # \r\n tmp[['weight_action_sum', 'num_of_visits', 'total_time']] = tmp[['weight_action_sum', 'num_of_visits', 'total_time']].rank()\r\n tmp['sum'] = tmp['weight_action_sum'] + 0.5 * tmp['num_of_visits'] + tmp['total_time']\r\n tmp['rank'] = tmp['sum'].rank()\r\n high_intention = list(tmp[(tmp['rank'] == 8) | (tmp['rank'] == 7)].index) # 高意向\r\n mid_intention = list(tmp[(tmp['rank'] == 5) | (tmp['rank'] == 6)].index) # 中等意向\r\n small_intention = list(tmp[(tmp['rank'] == 3) | (tmp['rank'] == 4)].index) # 低意向\r\n not_intention = list(tmp[(tmp['rank'] == 1) | (tmp['rank'] == 2)].index) # 无意向\r\n d = {}\r\n d.update(zip(high_intention, [4]*2)) # 高意向\r\n d.update(zip(mid_intention, [3]*2)) # 中等意向\r\n d.update(zip(small_intention, [2]*2)) # 低意向\r\n d.update(zip(not_intention, [1]*2)) # 无意向\r\n\r\n # \r\n @pandas_udf('double', PandasUDFType.SCALAR)\r\n def intention_map_func(pd_series):\r\n \r\n pd_series = pd_series.map(d.value)\r\n\r\n return pd_series\r\n\r\n d = sc.broadcast(d)\r\n df = df \\\r\n .withColumn('intention', intention_map_func(f.col('prediction')))\r\n\r\n return df\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # \r\n spark = SparkSession \\\r\n .builder \\\r\n .appName(\"intention_client_model\") \\\r\n .enableHiveSupport()\\\r\n .getOrCreate()\r\n sc = spark.sparkContext\r\n sc.setLogLevel(\"ERROR\") # WARN,ERROR,INFO\r\n\r\n # Enable Arrow-based columnar data transfers\r\n spark.conf.set(\"spark.sql.execution.arrow.enabled\", \"true\")\r\n\r\n # 加载数据\r\n data = load_data()\r\n ods_mk_xcx_log_action = data[0]\r\n ods_mk_xcx_arean = data[1]\r\n ods_mk_xcx_card_businesscard_c = data[2]\r\n\r\n #########################################################\r\n\r\n start_date = ods_mk_xcx_log_action\\\r\n .select(f.max(f.col('channelid')).alias('last_channelid'))\\\r\n .select(f.to_date(f.col('last_channelid')).alias('last_date'))\\\r\n .select(f.date_sub(f.col('last_date'), 60).alias('start_date'))\\\r\n .collect()[0]['start_date']\r\n print('start time:', start_date)\r\n\r\n ods_mk_xcx_log_action = ods_mk_xcx_log_action\\\r\n .filter((f.col('channelid') >= start_date))\\\r\n .coalesce(50)\\\r\n .cache() # 每次建模时,筛选最近两个月的数据进行建模\r\n\r\n df_train = filter_staff(ods_mk_xcx_log_action,\r\n ods_mk_xcx_card_businesscard_c).cache() # 剔除工作人员\r\n df_train = feat_eng(df_train) # 生成特征\r\n df_train = filter_action(df_train) # 筛选埋点动作\r\n min_date, max_date = get_time_range(df_train) # 确实训练时间范围\r\n df_train_agg = agg_feat(df_train).cache() # 生成训练样本一行为:一个时间窗口一个userid一个areaid ��聚合特征\r\n\r\n #\r\n info_ = df_train_agg.groupBy(\"window\").count()\r\n print(info_.sort(f.col('window')).show(truncate=False))\r\n print('\\ndf_train_agg_count:{}'.format(df_train_agg.count()))\r\n\r\n #########################################################\r\n\r\n df_train_agg = normalization(df_train_agg) # 向量化、归一化\r\n df_train_agg, km = model(df_train_agg) # 建模\r\n df_train_agg = result_processing(df_train_agg) # 结果后处理,生成高意向、中等意向、低意向、无意向标签\r\n df_train_agg\\\r\n .groupby('intention')\\\r\n .count()\\\r\n .sort('intention')\\\r\n .show()", "sub_path": "production_modeling_code.py", "file_name": "production_modeling_code.py", "file_ext": "py", "file_size_in_byte": 15999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pyspark.sql.functions.to_date", "line_number": 24, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 24, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 24, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.to_timestamp", "line_number": 25, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 25, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 25, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.max", "line_number": 45, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 45, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 47, "usage_type": "call"}, {"api_name": "pyspark.sql.types.DoubleType", "line_number": 64, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.when", "line_number": 71, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 71, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 71, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 72, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 72, "usage_type": "name"}, {"api_name": "pyspark.sql.types.DoubleType", "line_number": 80, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.when", "line_number": 85, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 85, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 85, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 86, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 86, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 97, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 97, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.countDistinct", "line_number": 105, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 105, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 105, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.countDistinct", "line_number": 109, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 109, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 109, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 133, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 133, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 134, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 134, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 135, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 135, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 136, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 136, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 137, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 137, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 138, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 138, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 139, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 139, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 140, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 140, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 141, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 141, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 142, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 142, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 143, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 143, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 144, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 144, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 145, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 145, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.pandas_udf", "line_number": 160, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.PandasUDFType.SCALAR", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pyspark.sql.functions.PandasUDFType", "line_number": 160, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 208, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 208, "usage_type": "name"}, {"api_name": "pyspark.sql.Window.partitionBy", "line_number": 220, "usage_type": "call"}, {"api_name": "pyspark.sql.Window", "line_number": 220, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.unix_timestamp", "line_number": 222, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 222, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 222, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.lag", "line_number": 223, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 223, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 223, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.when", "line_number": 224, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 224, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 224, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.when", "line_number": 225, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 225, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.isnull", "line_number": 225, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 225, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.when", "line_number": 226, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 226, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.isnull", "line_number": 226, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 226, "usage_type": "call"}, {"api_name": "pyspark.sql.Window.partitionBy", "line_number": 237, "usage_type": "call"}, {"api_name": "pyspark.sql.Window", "line_number": 237, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.unix_timestamp", "line_number": 239, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 239, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 239, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.lag", "line_number": 240, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 240, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 240, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 241, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 241, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.when", "line_number": 242, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 242, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 242, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.when", "line_number": 243, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 243, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.isnull", "line_number": 243, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 243, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 277, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 277, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.window", "line_number": 278, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 278, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.sum", "line_number": 280, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 280, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 280, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.sum", "line_number": 281, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 281, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 281, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.sum", "line_number": 282, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 282, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 282, "usage_type": "call"}, {"api_name": "pyspark.sql.Window.partitionBy", "line_number": 296, "usage_type": "call"}, {"api_name": "pyspark.sql.Window", "line_number": 296, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 298, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 298, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.sum", "line_number": 298, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 299, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 299, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.sum", "line_number": 299, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 300, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 300, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.sum", "line_number": 300, "usage_type": "call"}, {"api_name": "pyspark.ml.feature.VectorAssembler", "line_number": 309, "usage_type": "call"}, {"api_name": "pyspark.ml.feature.MinMaxScaler", "line_number": 315, "usage_type": "call"}, {"api_name": "pyspark.ml.clustering.KMeans", "line_number": 327, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.mean", "line_number": 345, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 345, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.pandas_udf", "line_number": 363, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.PandasUDFType.SCALAR", "line_number": 363, "usage_type": "attribute"}, {"api_name": "pyspark.sql.functions.PandasUDFType", "line_number": 363, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 372, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 372, "usage_type": "name"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 380, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 380, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 380, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.max", "line_number": 400, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 400, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 400, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.to_date", "line_number": 401, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 401, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 401, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.date_sub", "line_number": 402, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 402, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 402, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 407, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 407, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 420, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 420, "usage_type": "name"}]} +{"seq_id": "431711710", "text": "# -*- coding: utf-8 -*-\n\"\"\"API models for working with device and user assets.\"\"\"\nimport copy\nimport sys\n\nfrom ...constants import (DEFAULT_PATH, FIELD_JOINER, FIELD_TRIM_LEN,\n FIELD_TRIM_STR, SCHEMAS_CUSTOM)\nfrom ...exceptions import ApiError\nfrom ...tools import (calc_percent, echo_error, echo_ok, echo_warn, get_path,\n join_kv, listify)\n\n\nclass Base:\n \"\"\"Object for handling callbacks for assets.\"\"\"\n\n CB_NAME = \"base\"\n FIND_KEYS = [\"name\", \"name_qual\", \"column_title\", \"name_base\"]\n\n def __init__(\n self, apiobj, store, state=None, fields_map=None, getargs=None,\n ):\n \"\"\"Object for handling callbacks for assets.\"\"\"\n self.LOG = apiobj.LOG.getChild(self.__class__.__name__)\n \"\"\":obj:`logging.Logger`: logger for this object.\"\"\"\n\n self.APIOBJ = apiobj\n \"\"\":obj:`AssetMixin`: assets object.\"\"\"\n\n self.ALL_SCHEMAS = fields_map or apiobj.fields.get()\n \"\"\":obj:`dict`: map of adapter -> field schemas.\"\"\"\n\n self.STATE = state or {}\n \"\"\":obj:`dict`: state dict used by get assets method to track paging.\"\"\"\n\n self.STORE = store or {}\n \"\"\":obj:`dict`: store dict used by get assets method to track arguments.\"\"\"\n\n self.GETARGS = getargs or {}\n \"\"\":obj:`dict`: original kwargs supplied to get assets method.\"\"\"\n\n self.RAN = []\n \"\"\":obj:`list` of :obj:`str`: used by callbacks to see if they've run already.\"\"\"\n\n self.TAG_ROWS_ADD = []\n \"\"\":obj:`list` of :obj:`dict`: assets to add tags to in do_tagging.\"\"\"\n\n self.TAG_ROWS_REMOVE = []\n \"\"\":obj:`list` of :obj:`dict`: assets to remove tags from in do_tagging.\"\"\"\n\n self._init()\n\n def _init(self):\n \"\"\"Pass.\"\"\"\n pass\n\n def start(self, **kwargs):\n \"\"\"Run start callbacks.\"\"\"\n join = \"\\n - \"\n self.echo(msg=f\"Starting {self}\")\n\n cbargs = join + join.join(join_kv(obj=self.GETARGS))\n self.LOG.debug(f\"Get Extra Arguments: {cbargs}\")\n\n config = join + join.join(self.args_strs)\n self.echo(msg=f\"Configuration: {config}\")\n\n store = join + join.join(join_kv(obj=self.STORE))\n self.echo(msg=f\"Get Arguments: {store}\")\n\n schemas_pretty = self.APIOBJ.fields._prettify_schemas(\n schemas=self.schemas_selected\n )\n schemas_pretty = join + join.join(schemas_pretty)\n self.echo(msg=f\"Selected Columns: {schemas_pretty}\")\n\n final_columns = join + join.join(self.final_columns)\n self.echo(msg=f\"Final Columns: {final_columns}\")\n\n def stop(self, **kwargs):\n \"\"\"Run stop callbacks.\"\"\"\n self.do_tagging()\n self.echo(msg=f\"Stopping {self}\")\n\n def process_row(self, row):\n \"\"\"Handle callbacks for an asset.\"\"\"\n self.do_pre_row()\n return self.do_row(row=row)\n\n def do_pre_row(self):\n \"\"\"Pass.\"\"\"\n self.STATE.setdefault(\"rows_processed_total\", 0)\n self.STATE[\"rows_processed_total\"] += 1\n self.echo_page_progress()\n\n def do_row(self, row):\n \"\"\"Pass.\"\"\"\n self.process_tags_to_add(row=row)\n self.process_tags_to_remove(row=row)\n self.add_report_adapters_missing(row=row)\n\n for schema in self.schemas_selected:\n self.do_excludes(row=row, schema=schema)\n self.do_add_null_values(row=row, schema=schema)\n self.do_flatten_fields(row=row, schema=schema)\n\n new_rows = self.do_explode_field(row=row)\n for new_row in new_rows:\n self.do_join_values(row=new_row)\n self.do_change_field_titles(row=new_row)\n\n return new_rows\n\n def echo_page_progress(self):\n \"\"\"Asset callback to echo progress per N rows using an echo method.\"\"\"\n page_progress = self.GETARGS.get(\"page_progress\", 10000)\n if not page_progress or not isinstance(page_progress, int):\n return\n\n proc = self.STATE.get(\"rows_processed_total\", 0) or 0\n total = self.STATE.get(\"rows_to_fetch_total\", 0) or 0\n taken = self.STATE.get(\"fetch_seconds_total\", 0) or 0\n page_total = self.STATE.get(\"pages_to_fetch_total\", 0) or 0\n page_num = self.STATE.get(\"page_number\", 0) or 0\n\n if not ((proc % page_progress == 0) or (proc >= total) or (proc <= 1)):\n return\n\n percent = calc_percent(part=proc, whole=total)\n percent = f\"{percent:.2f}%\"\n percent = f\"{percent:>7}\"\n\n total_len = len(str(total))\n rows = f\"[ROWS: {proc:>{total_len}} / {total}]\"\n\n page_total_len = len(str(page_total))\n pages = f\"[PAGES: {page_num:>{page_total_len}} / {page_total}]\"\n\n taken = f\"{taken:.2f} seconds so far\"\n\n self.echo(msg=f\"PROGRESS: {percent} {rows} {pages} in {taken}\")\n\n def do_add_null_values(self, row, schema, key=\"name_qual\"):\n \"\"\"Null out missing fields.\"\"\"\n if not self.GETARGS.get(\"field_null\", False) or self.is_excluded(schema=schema):\n return\n\n null_value = self.GETARGS.get(\"field_null_value\", None)\n field = schema[key]\n\n if schema[\"is_complex\"]:\n row[field] = listify(row.get(field, []))\n\n for item in row[field]:\n for sub_schema in self.get_sub_schemas(schema=schema):\n self.do_add_null_values(schema=sub_schema, row=item, key=\"name\")\n else:\n row[field] = row.get(field, null_value)\n\n def do_excludes(self, row, schema):\n \"\"\"Asset callback to remove fields from row.\"\"\"\n if not self.GETARGS.get(\"field_excludes\", []):\n return\n\n if self.is_excluded(schema=schema):\n row.pop(schema[\"name_qual\"], None)\n return\n\n if schema[\"is_complex\"]:\n items = listify(row.get(schema[\"name_qual\"], []))\n for sub_schema in schema[\"sub_fields\"]:\n if self.is_excluded(schema=sub_schema):\n for item in items:\n item.pop(sub_schema[\"name\"], None)\n\n def do_join_values(self, row):\n \"\"\"Join values.\"\"\"\n if not self.GETARGS.get(\"field_join\", False):\n return\n\n joiner = str(self.GETARGS.get(\"field_join_value\", FIELD_JOINER))\n trim_len = self.GETARGS.get(\"field_join_trim\", FIELD_TRIM_LEN)\n trim_str = FIELD_TRIM_STR\n\n for field in row:\n if isinstance(row[field], list):\n row[field] = joiner.join([str(x) for x in row[field]])\n\n if trim_len and isinstance(row[field], str):\n field_len = len(row[field])\n if len(row[field]) >= trim_len:\n msg = trim_str.format(field_len=field_len, trim_len=trim_len)\n row[field] = joiner.join([row[field][:trim_len], msg])\n\n def do_change_field_titles(self, row):\n \"\"\"Asset callback to change qual name to title.\"\"\"\n if not self.GETARGS.get(\"field_titles\", False):\n return\n\n for schema in self.final_schemas:\n row[schema[\"column_title\"]] = row.pop(schema[\"name_qual\"], None)\n\n def do_flatten_fields(self, row, schema):\n \"\"\"Asset callback to flatten complex fields.\"\"\"\n if not self.GETARGS.get(\"field_flatten\", False):\n return\n\n if self.schema_to_explode == schema:\n return\n\n self._do_flatten_fields(row=row, schema=schema)\n\n def _do_flatten_fields(self, row, schema):\n \"\"\"Pass.\"\"\"\n if self.is_excluded(schema=schema):\n return\n\n if not schema[\"is_complex\"]:\n return\n\n null_value = self.GETARGS.get(\"field_null_value\", None)\n\n items = listify(row.pop(schema[\"name_qual\"], []))\n\n for sub_schema in self.get_sub_schemas(schema=schema):\n row[sub_schema[\"name_qual\"]] = []\n\n for item in items:\n value = item.pop(sub_schema[\"name\"], null_value)\n value = value if isinstance(value, list) else [value]\n row[sub_schema[\"name_qual\"]] += value\n\n def do_explode_field(self, row):\n \"\"\"Explode a field into multiple rows.\"\"\"\n explode = self.GETARGS.get(\"field_explode\", \"\")\n null_value = self.GETARGS.get(\"field_null_value\", None)\n\n if not explode:\n return [row]\n\n schema = self.schema_to_explode\n\n if self.is_excluded(schema=schema):\n return [row]\n\n original_row = copy.deepcopy(row)\n\n if schema[\"is_complex\"]:\n new_rows_map = {}\n items = listify(row.pop(schema[\"name_qual\"], []))\n\n for sub_schema in self.get_sub_schemas(schema=schema):\n for idx, item in enumerate(items):\n new_rows_map.setdefault(idx, copy.deepcopy(row))\n value = item.pop(sub_schema[\"name\"], null_value)\n new_rows_map[idx][sub_schema[\"name_qual\"]] = value\n else:\n new_rows_map = {}\n items = listify(row.pop(schema[\"name_qual\"], []))\n\n for idx, item in enumerate(items):\n new_rows_map.setdefault(idx, copy.deepcopy(row))\n new_rows_map[idx][schema[\"name_qual\"]] = item\n\n new_rows = [new_rows_map[idx] for idx in new_rows_map]\n\n if not new_rows:\n self._do_flatten_fields(row=original_row, schema=schema)\n return [original_row]\n\n return new_rows\n\n def do_tagging(self):\n \"\"\"Pass.\"\"\"\n self.do_tag_add()\n self.do_tag_remove()\n\n def do_tag_add(self):\n \"\"\"Pass.\"\"\"\n tags_add = listify(self.GETARGS.get(\"tags_add\", []))\n rows_add = self.TAG_ROWS_ADD\n if tags_add and rows_add:\n self.echo(msg=f\"Adding tags {tags_add} to {len(rows_add)} assets\")\n self.APIOBJ.labels.add(rows=rows_add, labels=tags_add)\n\n def do_tag_remove(self):\n \"\"\"Pass.\"\"\"\n tags_remove = listify(self.GETARGS.get(\"tags_remove\", []))\n rows_remove = self.TAG_ROWS_REMOVE\n if tags_remove and rows_remove:\n self.echo(msg=f\"Removing tags {tags_remove} from {len(rows_remove)} assets\")\n self.APIOBJ.labels.remove(rows=rows_remove, labels=tags_remove)\n\n def process_tags_to_add(self, row):\n \"\"\"Pass.\"\"\"\n tags = listify(self.GETARGS.get(\"tags_add\", []))\n if not tags:\n return\n\n tag_row = {\"internal_axon_id\": row[\"internal_axon_id\"]}\n\n if tag_row not in self.TAG_ROWS_ADD:\n self.TAG_ROWS_ADD.append(tag_row)\n\n def process_tags_to_remove(self, row):\n \"\"\"Pass.\"\"\"\n tags = listify(self.GETARGS.get(\"tags_remove\", []))\n if not tags:\n return\n\n tag_row = {\"internal_axon_id\": row[\"internal_axon_id\"]}\n\n if tag_row not in self.TAG_ROWS_REMOVE:\n self.TAG_ROWS_REMOVE.append(tag_row)\n\n def add_report_adapters_missing(self, row):\n \"\"\"Pass.\"\"\"\n if not self.GETARGS.get(\"report_adapters_missing\", False):\n return\n\n schemas = SCHEMAS_CUSTOM[\"report_adapters_missing\"]\n schema = schemas[\"adapters_missing\"]\n\n field_name = schema[\"name_qual\"]\n\n adapters_row = row.get(\"adapters\", [])\n adapter_map = self.adapter_map\n missing = []\n\n for adapter in adapter_map[\"all\"]:\n if adapter in adapters_row:\n continue\n\n if adapter not in adapter_map[\"all_fields\"]:\n continue\n\n if adapter not in missing:\n missing.append(adapter)\n\n row[field_name] = missing\n\n def is_excluded(self, schema):\n \"\"\"Check if a name supplied to field_excludes matches one of GET_SCHEMA_KEYS.\"\"\"\n excludes = listify(self.GETARGS.get(\"field_excludes\", []))\n\n for exclude in excludes:\n for key in self.FIND_KEYS:\n name = schema.get(key, None)\n if (name and exclude) and name == exclude:\n return True\n return False\n\n def open_fd_arg(self):\n \"\"\"Pass.\"\"\"\n self._fd = self.GETARGS[\"export_fd\"]\n self._fd_close = self.GETARGS.get(\"export_fd_close\", False)\n self.echo(msg=f\"Exporting to {self._fd}\")\n return self._fd\n\n def open_fd_path(self):\n \"\"\"Pass.\"\"\"\n self._export_file = self.GETARGS.get(\"export_file\", None)\n self._export_path = self.GETARGS.get(\"export_path\", DEFAULT_PATH)\n self._export_overwrite = self.GETARGS.get(\"export_overwrite\", False)\n\n file_path = get_path(obj=self._export_path)\n file_path.mkdir(mode=0o700, parents=True, exist_ok=True)\n self._file_path = fp = (file_path / self._export_file).resolve()\n\n if self._file_path.exists():\n self._file_mode = \"overwrote\"\n mode = \"overwriting\"\n else:\n self._file_mode = \"created\"\n mode = \"creating\"\n\n if self._file_path.exists() and not self._export_overwrite:\n msg = f\"Export file '{fp}' already exists and overwite is False!\"\n self.echo(msg=msg, error=ApiError, level=\"error\")\n\n self._file_path.touch(mode=0o600)\n self._fd_close = self.GETARGS.get(\"export_fd_close\", True)\n self._fd = self._file_path.open(mode=\"w\", encoding=\"utf-8\")\n self.echo(msg=f\"Exporting to file '{fp}' ({mode})\")\n return self._fd\n\n def open_fd_stdout(self):\n \"\"\"Pass.\"\"\"\n self._file_path = None\n self._fd_close = False\n self._fd = sys.stdout\n self.echo(msg=\"Exporting to stdout\")\n return self._fd\n\n def open_fd(self):\n \"\"\"Open a file descriptor.\"\"\"\n if \"export_fd\" in self.GETARGS:\n self.open_fd_arg()\n elif self.GETARGS.get(\"export_file\", None):\n self.open_fd_path()\n else:\n self.open_fd_stdout()\n return self._fd\n\n def close_fd(self):\n \"\"\"Close a file descriptor.\"\"\"\n self._fd.write(\"\\n\")\n if getattr(self, \"_fd_close\", False):\n name = str(getattr(self._fd, \"name\", self._fd))\n self.echo(msg=f\"Finished exporting to {name!r}\")\n self._fd.close()\n\n def echo(\n self,\n msg,\n error=False,\n warning=False,\n level=\"info\",\n level_error=\"error\",\n level_warning=\"warning\",\n abort=True,\n ):\n \"\"\"Pass.\"\"\"\n do_echo = self.GETARGS.get(\"do_echo\", False)\n\n if do_echo:\n if warning:\n echo_warn(msg=msg)\n elif error:\n echo_error(msg=msg, abort=abort)\n else:\n echo_ok(msg=msg)\n return\n\n if warning:\n getattr(self.LOG, level_warning)(msg)\n elif error:\n getattr(self.LOG, level_error)(msg)\n if abort:\n raise error(msg)\n\n getattr(self.LOG, level)(msg)\n\n def get_sub_schemas(self, schema):\n \"\"\"Pass.\"\"\"\n sub_schemas = schema[\"sub_fields\"]\n for sub_schema in sub_schemas:\n if self.is_excluded(schema=sub_schema) or not sub_schema[\"is_root\"]:\n continue\n yield sub_schema\n\n @property\n def custom_schemas(self):\n \"\"\"Pass.\"\"\"\n schemas = []\n if self.GETARGS.get(\"report_adapters_missing\", False):\n schemas += list(SCHEMAS_CUSTOM[\"report_adapters_missing\"].values())\n return schemas\n\n @property\n def final_schemas(self):\n \"\"\"Predict the future schemas that will be returned.\"\"\"\n if hasattr(self, \"_final_schemas\"):\n return self._final_schemas\n\n flat = self.GETARGS.get(\"field_flatten\", False)\n explode_field_name = self.schema_to_explode.get(\"name_qual\", \"\")\n\n final = {}\n\n for schema in self.schemas_selected:\n if self.is_excluded(schema=schema):\n continue\n\n is_explode_field = schema[\"name_qual\"] == explode_field_name\n if schema[\"is_complex\"] and (is_explode_field or flat):\n for sub_schema in self.get_sub_schemas(schema=schema):\n final[sub_schema[\"name_qual\"]] = sub_schema\n else:\n final.setdefault(schema[\"name_qual\"], schema)\n\n self._final_schemas = list(final.values())\n return self._final_schemas\n\n @property\n def final_columns(self):\n \"\"\"Pass.\"\"\"\n if hasattr(self, \"_final_columns\"):\n return self._final_columns\n\n use_titles = self.GETARGS.get(\"field_titles\", False)\n key = \"column_title\" if use_titles else \"name_qual\"\n self._final_columns = [x[key] for x in self.final_schemas]\n\n return self._final_columns\n\n @property\n def fields_selected(self):\n \"\"\"Pass.\"\"\"\n if hasattr(self, \"_fields_selected\"):\n return self._fields_selected\n\n include_details = self.STORE.get(\"include_details\", False)\n\n fields = listify(self.STORE.get(\"fields\", []))\n api_fields = [x for x in self.APIOBJ.FIELDS_API if x not in fields]\n\n if include_details:\n api_fields += [\"meta_data.client_used\", \"unique_adapter_names_details\"]\n\n self._fields_selected = []\n\n for field in api_fields + fields:\n self._fields_selected.append(field)\n if include_details and not field.startswith(\"adapters_data.\"):\n field_details = f\"{field}_details\"\n self._fields_selected.append(field_details)\n\n return self._fields_selected\n\n @property\n def schemas_selected(self):\n \"\"\"Pass.\"\"\"\n if hasattr(self, \"_schemas_selected\"):\n return self._schemas_selected\n\n self._schemas_selected = [] + self.custom_schemas\n\n all_schemas = self.ALL_SCHEMAS\n\n if isinstance(self.ALL_SCHEMAS, dict):\n all_schemas = []\n for schemas in self.ALL_SCHEMAS.values():\n all_schemas += schemas\n\n all_schemas_map = {x[\"name_qual\"]: x for x in all_schemas}\n\n for field in self.fields_selected:\n if field in all_schemas_map:\n self._schemas_selected.append(all_schemas_map[field])\n else:\n msg = f\"No schema found for field {field}\"\n self.echo(msg=msg, warning=True)\n\n return self._schemas_selected\n\n @property\n def schema_to_explode(self):\n \"\"\"Pass.\"\"\"\n if hasattr(self, \"_schema_to_explode\"):\n return self._schema_to_explode\n\n explode = self.GETARGS.get(\"field_explode\", \"\")\n\n self._schema_to_explode = {}\n\n if not explode:\n return self._schema_to_explode\n\n valids = []\n\n for schema in self.schemas_selected:\n for key in self.FIND_KEYS:\n name = schema.get(key)\n if name:\n valids.append(name)\n if name == explode:\n self._schema_to_explode = schema\n return self._schema_to_explode\n\n valids = sorted(list(set(valids)))\n msg = f\"Explode field {explode!r} not found, valid fields:{valids}\"\n self.echo(msg=msg, error=ApiError)\n\n @property\n def adapter_map(self):\n \"\"\"Pass.\"\"\"\n self._adapters_meta = getattr(self, \"_adapters_meta\", self.APIOBJ.adapters.get())\n amap = {\n \"has_cnx\": [],\n \"all\": [],\n \"all_fields\": [f\"{x}_adapter\" for x in self.ALL_SCHEMAS],\n }\n\n for adapter in self._adapters_meta:\n name_raw = adapter[\"name_raw\"]\n\n if adapter not in amap[\"all\"]:\n amap[\"all\"].append(name_raw)\n if adapter[\"cnx\"]:\n amap[\"has_cnx\"].append(name_raw)\n\n amap = {k: list(v) for k, v in amap.items()}\n return amap\n\n @property\n def args_map(self):\n \"\"\"Pass.\"\"\"\n return [\n [\"field_excludes\", \"Exclude fields:\", []],\n [\"field_flatten\", \"Flatten complex fields:\", False],\n [\"field_explode\", \"Explode field:\", None],\n [\"field_titles\", \"Rename fields to titles:\", False],\n [\"field_join\", \"Join field values:\", False],\n [\"field_join_value\", \"Join field values using:\", FIELD_JOINER],\n [\"field_join_trim\", \"Join field character limit:\", FIELD_TRIM_LEN],\n [\"field_null\", \"Add missing fields:\", False],\n [\"field_null_value\", \"Missing field value:\", None],\n [\"tags_add\", \"Add tags:\", []],\n [\"tags_remove\", \"Remove tags:\", []],\n [\"report_adapters_missing\", \"Report Missing Adapters:\", False],\n [\"export_file\", \"Export to file:\", None],\n [\"export_path\", \"Export file to path:\", DEFAULT_PATH],\n [\"export_overwrite\", \"Export overwrite file:\", False],\n [\"export_schema\", \"Export schema:\", False],\n [\"page_progress\", \"Progress per row count:\", 10000],\n [\"json_flat\", \"Produce flat json:\", False],\n ]\n\n @property\n def args_strs(self):\n \"\"\"Pass.\"\"\"\n lines = []\n for arg, text, default in self.args_map:\n value = self.GETARGS.get(arg, default)\n if isinstance(value, str):\n value = repr(value)\n if isinstance(value, list):\n value = \", \".join(value)\n value = value or None\n lines.append(f\"{text:30}{value}\")\n return lines\n\n def __str__(self):\n \"\"\"Show info for this object.\"\"\"\n return f\"{self.CB_NAME.upper()} processor\"\n\n def __repr__(self):\n \"\"\"Show info for this object.\"\"\"\n return self.__str__()\n", "sub_path": "axonius_api_client/api/asset_callbacks/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 21622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tools.join_kv", "line_number": 61, "usage_type": "call"}, {"api_name": "tools.join_kv", "line_number": 67, "usage_type": "call"}, {"api_name": "tools.calc_percent", "line_number": 128, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 151, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 169, "usage_type": "call"}, {"api_name": "constants.FIELD_JOINER", "line_number": 180, "usage_type": "argument"}, {"api_name": "constants.FIELD_TRIM_LEN", "line_number": 181, "usage_type": "argument"}, {"api_name": "constants.FIELD_TRIM_STR", "line_number": 182, "usage_type": "name"}, {"api_name": "tools.listify", "line_number": 222, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 245, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 249, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 253, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 258, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 261, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 279, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 287, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 295, "usage_type": "call"}, {"api_name": "tools.listify", "line_number": 306, "usage_type": "call"}, {"api_name": "constants.SCHEMAS_CUSTOM", "line_number": 320, "usage_type": "name"}, {"api_name": "tools.listify", "line_number": 343, "usage_type": "call"}, {"api_name": "constants.DEFAULT_PATH", "line_number": 362, "usage_type": "argument"}, {"api_name": "tools.get_path", "line_number": 365, "usage_type": "call"}, {"api_name": "exceptions.ApiError", "line_number": 378, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 390, "usage_type": "attribute"}, {"api_name": "tools.echo_warn", "line_number": 427, "usage_type": "call"}, {"api_name": "tools.echo_error", "line_number": 429, "usage_type": "call"}, {"api_name": "tools.echo_ok", "line_number": 431, "usage_type": "call"}, {"api_name": "constants.SCHEMAS_CUSTOM", "line_number": 456, "usage_type": "name"}, {"api_name": "tools.listify", "line_number": 504, "usage_type": "call"}, {"api_name": "exceptions.ApiError", "line_number": 572, "usage_type": "name"}, {"api_name": "constants.FIELD_JOINER", "line_number": 604, "usage_type": "name"}, {"api_name": "constants.FIELD_TRIM_LEN", "line_number": 605, "usage_type": "name"}, {"api_name": "constants.DEFAULT_PATH", "line_number": 612, "usage_type": "name"}]} +{"seq_id": "500263556", "text": "# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration\n\n# @file PyUtils.RootUtils\n# @author Sebastien Binet\n# @purpose a few utils to ease the day-to-day work with ROOT\n# @date November 2009\n\nfrom __future__ import with_statement, print_function\n\n__doc__ = \"a few utils to ease the day-to-day work with ROOT\"\n__author__ = \"Sebastien Binet\"\n\n__all__ = [\n 'import_root',\n 'root_compile',\n ]\n\n### imports -------------------------------------------------------------------\nimport os\nimport re\nimport six\n\nfrom .Decorators import memoize\n\n### functions -----------------------------------------------------------------\ndef import_root(batch=True):\n \"\"\"a helper method to wrap the 'import ROOT' statement to prevent ROOT\n from screwing up the display or loading graphics libraries when in batch\n mode (which is the default.)\n\n e.g.\n >>> ROOT = import_root(batch=True)\n >>> f = ROOT.TFile.Open(...)\n \"\"\"\n import ROOT\n ROOT.gROOT.SetBatch(batch)\n if batch:\n ROOT.PyConfig.IgnoreCommandLineOptions = True\n import cppyy # noqa: F401\n if os.environ.get('GLIBCXX_USE_CXX11_ABI') == '0':\n cmd = ROOT.gSystem.GetMakeSharedLib()\n if cmd.find('GLIBCXX_USE_CXX11_ABI') < 0:\n cmd = cmd.replace ('$SourceFiles', '$SourceFiles -D_GLIBCXX_USE_CXX11_ABI=0 ')\n ROOT.gSystem.SetMakeSharedLib(cmd)\n return ROOT\n\n_tempfiles = []\n_first_compile = True\ndef root_compile(src=None, fname=None, batch=True):\n \"\"\"a helper method to compile a set of C++ statements (via ``src``) or\n a C++ file (via ``fname``) via ACLiC\n \"\"\"\n if src is not None and fname is not None:\n raise ValueError(\"'src' xor 'fname' should be not None, *not* both\")\n\n if src is None and fname is None:\n raise ValueError(\"'src' xor 'fname' should be None, *not* both\")\n\n # Cling bug workaround: Cling will try to find a definition for the\n # hidden __gmon_start__ by opening all libraries on LD_LIBRARY_PATH.\n # But it will crash if it encounters a separate-debug library.\n # Work around by adding a dummy definition of __gmon_start__.\n # See !31633.\n global _first_compile\n if _first_compile:\n _first_compile = False\n root_compile ('extern \"C\" { void __gmon_start__(){}; }', None, True)\n return _root_compile (src, fname, batch)\n\ndef _root_compile (src, fname, batch):\n import os\n from .Helpers import ShutUp as root_shutup\n \n ROOT = import_root(batch=batch)\n compile_options = \"f\"\n if 'dbg' in os.environ.get('CMTCONFIG', 'opt'):\n compile_options += 'g'\n else:\n compile_options += 'O'\n\n src_file = None\n if src:\n import textwrap\n import tempfile\n src_file = tempfile.NamedTemporaryFile(prefix='root_aclic_',\n suffix='.cxx')\n src_file.write(textwrap.dedent(src).encode())\n src_file.flush()\n src_file.seek(0)\n fname = src_file.name\n\n # Apparently, cling caches files by inode.\n # If you ask it to read a file that has the same inode as one\n # that it has already read, then it will just use the cached\n # contents rather than rereading. This, however, doesn't play\n # very well if we're reading temp files, where inodes may be reused,\n # giving rise to hard-to-reproduce failures.\n #\n # Try to avoid this by keeping the temp files open until the\n # the program exits.\n _tempfiles.append (src_file)\n pass\n\n elif fname:\n import os.path as osp\n fname = osp.expanduser(osp.expandvars(fname))\n pass\n \n assert os.access(fname, os.R_OK), \"could not read [%s]\"%(fname,)\n orig_root_lvl = ROOT.gErrorIgnoreLevel\n ROOT.gErrorIgnoreLevel = ROOT.kWarning\n try:\n with root_shutup():\n sc = ROOT.gSystem.CompileMacro(fname, compile_options)\n if sc == ROOT.kFALSE:\n raise RuntimeError(\n 'problem compiling ROOT macro (rc=%s)'%(sc,)\n )\n finally:\n ROOT.gErrorIgnoreLevel = orig_root_lvl\n return\n \n@memoize\ndef _pythonize_tfile():\n import cppyy\n root = import_root()\n import PyUtils.Helpers as H\n with H.ShutUp(filters=[\n re.compile(\n 'TClass::TClass:0: RuntimeWarning: no dictionary for.*'),\n re.compile(\n 'Warning in <TEnvRec::ChangeValue>: duplicate entry.*'\n ),\n ]):\n cppyy.loadDict(\"RootUtilsPyROOTDict\")\n rootutils = getattr(root, \"RootUtils\")\n pybytes = getattr(rootutils, \"PyBytes\") # noqa: F841\n #MN: lines below fail in ROOT6 if PCM from RootUtils is not found\n read_root_file = getattr(rootutils, \"_pythonize_read_root_file\")\n tell_root_file = getattr(rootutils, \"_pythonize_tell_root_file\")\n pass\n def read(self, size=-1):\n \"\"\"read([size]) -> read at most size bytes, returned as a string.\n\n If the size argument is negative or omitted, read until EOF is reached.\n Notice that when in non-blocking mode, less data than what was requested\n may be returned, even if no size parameter was given.\n\n FIXME: probably doesn't follow python file-like conventions...\n \"\"\"\n SZ = 4096\n\n # FIXME: Once we drop py2, we can simplify this by using a bytes\n # object directly instead of PyBytes.\n if size>=0:\n #size = _adjust_sz(size)\n #print (\"-->0\",self.tell(),size)\n c_buf = read_root_file(self, size)\n if c_buf and c_buf.sz:\n v = c_buf.buf\n if six.PY3:\n return bytes([ord(v[i]) for i in range(v.size())])\n return ''.join([v[i] for i in range(v.size())])\n return ''\n else:\n size = SZ\n out = []\n while True:\n #size = _adjust_sz(size)\n c_buf = read_root_file(self, size)\n if c_buf and c_buf.sz:\n v = c_buf.buf\n if six.PY3:\n chunk = bytes([ord(v[i]) for i in range(v.size())])\n else:\n chunk = ''.join([v[i] for i in range(v.size())])\n out.append(chunk)\n else:\n break\n if six.PY3:\n return b''.join(out)\n return ''.join(out)\n \n root.TFile.read = read\n del read\n \n root.TFile.seek = root.TFile.Seek\n root.TFile.tell = lambda self: tell_root_file(self)\n ## import os\n ## def tell(self):\n ## fd = os.dup(self.GetFd())\n ## return os.fdopen(fd).tell()\n ## root.TFile.tell = tell\n ## del tell\n return \n\n\ndef _getLeaf (l):\n tname = l.GetTypeName()\n ndat = l.GetNdata()\n if tname in ['UInt_t', 'Int_t', 'ULong64_t', 'Long64_t']:\n return [l.GetValueLong64(i) for i in range(ndat)]\n if tname in ['Float_t', 'Double_t']:\n return [l.GetValue(i) for i in range(ndat)]\n if tname in ['Char_t']:\n try:\n return l.GetValueString() # TLeafC for variable size string\n except Exception:\n return [l.GetValue(i) for i in range(ndat)] # TLeafB for 8-bit integers\n return None\n\nclass RootFileDumper(object):\n \"\"\"\n A helper class to dump in more or less human readable form the content of\n any TTree.\n \"\"\"\n \n def __init__(self, fname, tree_name=\"CollectionTree\"):\n object.__init__(self)\n\n ROOT = import_root()\n\n # remember if an error or problem occurred during the dump\n self.allgood = True\n \n self.root_file = ROOT.TFile.Open(fname)\n if (self.root_file is None or\n not isinstance(self.root_file, ROOT.TFile) or\n not self.root_file.IsOpen()):\n raise IOError('could not open [%s]'% fname)\n\n self.tree = self.root_file.Get(tree_name)\n if self.tree is None or not isinstance(self.tree, ROOT.TTree):\n raise AttributeError('no tree [%s] in file [%s]', tree_name, fname)\n\n if 0:\n self._trees = []\n keys = [k.GetName() for k in self.root_file.GetListOfKeys()]\n for k in keys:\n o = self.root_file.Get(k)\n if isinstance(o, ROOT.TTree):\n self._trees.append(k)\n pass\n\n return\n\n def dump(self, tree_name, itr_entries, leaves=None):\n\n ROOT = import_root()\n import AthenaPython.PyAthena as PyAthena\n _pythonize = PyAthena.RootUtils.PyROOTInspector.pyroot_inspect2\n\n self.tree = self.root_file.Get(tree_name)\n if self.tree is None or not isinstance(self.tree, ROOT.TTree):\n raise AttributeError('no tree [%s] in file [%s]', tree_name, self.root_file.GetName())\n\n tree = self.tree\n nentries = tree.GetEntries()\n branches = sorted([b.GetName().rstrip('\\0') for b in tree.GetListOfBranches()])\n if leaves is None: leaves = branches\n else: leaves = [str(b).rstrip('\\0') for b in leaves]\n \n # handle itr_entries\n if isinstance(itr_entries, str):\n if ':' in itr_entries:\n def toint(s):\n if s == '':\n return None\n try:\n return int(s)\n except ValueError:\n return s\n from itertools import islice\n itr_entries = islice(range(nentries),\n *map(toint, itr_entries.split(':')))\n elif ('range' in itr_entries or\n ',' in itr_entries):\n itr_entries = eval(itr_entries)\n else:\n try:\n _n = int(itr_entries)\n itr_entries = range(_n)\n except ValueError:\n print (\"** err ** invalid 'itr_entries' argument. will iterate over all entries.\")\n itr_entries = range(nentries)\n elif isinstance(itr_entries, list):\n itr_entries = itr_entries\n else:\n itr_entries = range(itr_entries)\n \n for ientry in itr_entries:\n hdr = \":: entry [%05i]...\" % (ientry,)\n #print (hdr)\n #print (hdr, file=self.fout)\n err = tree.LoadTree(ientry)\n if err < 0:\n print (\"**err** loading tree for entry\",ientry)\n self.allgood = False\n break\n\n nbytes = tree.GetEntry(ientry)\n if nbytes <= 0:\n print (\"**err** reading entry [%s] of tree [%s]\" % (ientry, tree_name))\n hdr = \":: entry [%05i]... [ERR]\" % (ientry,)\n print (hdr)\n self.allgood = False\n continue\n\n for br_name in leaves:\n hdr = \":: branch [%s]...\" % (br_name,)\n #print (hdr)\n #tree.GetBranch(br_name).GetEntry(ientry)\n py_name = [br_name]\n\n br = tree.GetBranch (br_name)\n if br.GetClassName() != '':\n val = getattr(tree, br_name)\n else:\n vals = [_getLeaf (l) for l in br.GetListOfLeaves()]\n if len(vals) == 0:\n val = None\n elif len(vals) == 1:\n val = vals\n else:\n val = tuple(vals)\n if not (val is None):\n #print (\"-->\",val,br_name)\n try:\n vals = _pythonize(val, py_name, True)\n except Exception as err:\n print (\"**err** for branch [%s] val=%s (type=%s)\" % (\n br_name, val, type(val),\n ))\n self.allgood = False\n print (err)\n for o in vals:\n n = list(map(str, o[0]))\n v = o[1]\n yield tree_name, ientry, n, v\n\n pass # loop over branch names\n pass # loop over entries\n pass # class RootFileDumper\n\n### test support --------------------------------------------------------------\ndef _test_main():\n root = import_root() # noqa: F841\n def no_raise(msg, fct, *args, **kwds):\n caught = False\n err = None\n try:\n fct(*args, **kwds)\n except Exception as xerr:\n err = xerr\n caught = True\n assert not caught, \"%s:\\n%s\\nERROR\" % (msg, err,)\n\n no_raise(\"problem pythonizing TFile\", fct=_pythonize_tfile)\n no_raise(\"problem compiling dummy one-liner\",\n root_compile, \"void foo1() { return ; }\")\n no_raise(\"problem compiling dummy one-liner w/ kwds\",\n fct=root_compile, src=\"void foo1a() { return ; }\")\n import tempfile\n # PvG workaround for ROOT-7059\n dummy = tempfile.NamedTemporaryFile(prefix=\"foo_\",suffix=\".cxx\") # noqa: F841\n with tempfile.NamedTemporaryFile(prefix=\"foo_\",suffix=\".cxx\") as tmp:\n tmp.write (b\"void foo2() { return ; }\\n\")\n tmp.flush()\n no_raise(\"problem compiling a file\",\n fct=root_compile, fname=tmp.name)\n\n print (\"OK\")\n return True\n\nif __name__ == \"__main__\":\n _test_main()\n \n", "sub_path": "Tools/PyUtils/python/RootUtils.py", "file_name": "RootUtils.py", "file_ext": "py", "file_size_in_byte": 13478, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ROOT.gROOT.SetBatch", "line_number": 36, "usage_type": "call"}, {"api_name": "ROOT.gROOT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "ROOT.PyConfig", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 40, "usage_type": "attribute"}, {"api_name": "ROOT.gSystem.GetMakeSharedLib", "line_number": 41, "usage_type": "call"}, {"api_name": "ROOT.gSystem", "line_number": 41, "usage_type": "attribute"}, {"api_name": "ROOT.gSystem.SetMakeSharedLib", "line_number": 44, "usage_type": "call"}, {"api_name": "ROOT.gSystem", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 76, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 85, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "name"}, {"api_name": "os.path.expandvars", "line_number": 106, "usage_type": "call"}, {"api_name": "os.access", "line_number": 109, "usage_type": "call"}, {"api_name": "os.R_OK", "line_number": 109, "usage_type": "attribute"}, {"api_name": "ROOT.gErrorIgnoreLevel", "line_number": 110, "usage_type": "attribute"}, {"api_name": "ROOT.gErrorIgnoreLevel", "line_number": 111, "usage_type": "attribute"}, {"api_name": "ROOT.kWarning", "line_number": 111, "usage_type": "attribute"}, {"api_name": "Helpers.ShutUp", "line_number": 113, "usage_type": "call"}, {"api_name": "ROOT.gSystem.CompileMacro", "line_number": 114, "usage_type": "call"}, {"api_name": "ROOT.gSystem", "line_number": 114, "usage_type": "attribute"}, {"api_name": "ROOT.kFALSE", "line_number": 115, "usage_type": "attribute"}, {"api_name": "ROOT.gErrorIgnoreLevel", "line_number": 120, "usage_type": "attribute"}, {"api_name": "PyUtils.Helpers.ShutUp", "line_number": 128, "usage_type": "call"}, {"api_name": "PyUtils.Helpers", "line_number": 128, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 129, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 131, "usage_type": "call"}, {"api_name": "cppyy.loadDict", "line_number": 135, "usage_type": "call"}, {"api_name": "six.PY3", "line_number": 161, "usage_type": "attribute"}, {"api_name": "six.PY3", "line_number": 173, "usage_type": "attribute"}, {"api_name": "six.PY3", "line_number": 180, "usage_type": "attribute"}, {"api_name": "Decorators.memoize", "line_number": 123, "usage_type": "name"}, {"api_name": "ROOT.TFile.Open", "line_number": 226, "usage_type": "call"}, {"api_name": "ROOT.TFile", "line_number": 226, "usage_type": "attribute"}, {"api_name": "ROOT.TFile", "line_number": 228, "usage_type": "attribute"}, {"api_name": "ROOT.TTree", "line_number": 233, "usage_type": "attribute"}, {"api_name": "ROOT.TTree", "line_number": 241, "usage_type": "attribute"}, {"api_name": "AthenaPython.PyAthena.RootUtils", "line_number": 251, "usage_type": "attribute"}, {"api_name": "AthenaPython.PyAthena", "line_number": 251, "usage_type": "name"}, {"api_name": "ROOT.TTree", "line_number": 254, "usage_type": "attribute"}, {"api_name": "itertools.islice", "line_number": 274, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 365, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 366, "usage_type": "call"}]} +{"seq_id": "462157700", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 27 23:07:36 2019\n\n@author: Admin\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom pynput.mouse import Controller,Button\nclk=\"Yes\"\nmouse=Controller()\ndef No(x):\n pass\nmodel=tf.keras.models.load_model(\"Model.model\")\nkernelOpen=np.ones((5,5))\nkernelClose=np.ones((20,20))\ncv2.namedWindow(\"Window\")\ncv2.createTrackbar(\"H-H\",\"Window\",0,255,No)\ncv2.createTrackbar(\"H-S\",\"Window\",0,255,No)\ncv2.createTrackbar(\"H-V\",\"Window\",0,255,No)\ncv2.createTrackbar(\"L-H\",\"Window\",0,255,No)\ncv2.createTrackbar(\"L-S\",\"Window\",0,255,No)\ncv2.createTrackbar(\"L-V\",\"Window\",0,255,No)\nliste=[]\ncap=cv2.VideoCapture(0)\nret,frame=cap.read()\nstart=False\ngoal=\"None\"\ncate=[\"One\",\"Two\",\"Three\",\"None\"]\nwhile True:\n if cv2.waitKey(10)==115:\n if start==False:\n start=True\n elif start==True:\n start=False\n ret,frame=cap.read()\n hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n l_h=cv2.getTrackbarPos(\"L-H\",\"Window\")\n l_s=cv2.getTrackbarPos(\"L-S\",\"Window\")\n l_v=cv2.getTrackbarPos(\"L-V\",\"Window\")\n h_h=cv2.getTrackbarPos(\"H-H\",\"Window\")\n h_v=cv2.getTrackbarPos(\"H-S\",\"Window\")\n h_s=cv2.getTrackbarPos(\"H-V\",\"Window\")\n lower=np.array([l_h,l_s,l_v])\n upper=np.array([h_h,h_s,h_v])\n frame=cv2.GaussianBlur(frame,(15,15),0)\n mask=cv2.inRange(hsv,lower,upper)\n cv2.imshow(\"Window\",mask)\n mask2=mask\n maskOpen=cv2.morphologyEx(mask2,cv2.MORPH_OPEN,kernelOpen)\n maskClose=cv2.morphologyEx(mask2,cv2.MORPH_CLOSE,kernelClose)\n maskFinal=maskClose\n conts,h=cv2.findContours(maskFinal.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n if len(conts)<5000 and len(conts)>=1:\n areas=[cv2.contourArea(c) for c in conts]\n max_ind=np.argmax(areas)\n cnt=conts[max_ind]\n if cv2.contourArea(cnt)>1000:\n\n x1,y1,h1,w1=cv2.boundingRect(cnt)\n if start==True:\n x2,y2,h2,w2=cv2.boundingRect(cnt)\n centerX,centerY=(x2+h2/2,y2+w2/2)\n distX,distY=(320-centerX,240-centerY)\n dx=distX\n dy=distY\n dx1=dx\n dy1=dy\n if abs(distX)>20 and abs(distY)>20:\n mouse.move(dx/10,-dy/10)\n clk=\"None\"\n else:\n clk=\"Yes\"\n\n\n mask=cv2.resize(mask,(50,50))\n \n mask=mask/255\n \n \n \n \n \n mask=mask.reshape(-1,50,50,1)\n ans=model.predict(mask)\n state=cate[np.argmax(ans)]\n if clk!=\"None\" or goal==\"None\":\n if state==\"One\":\n mouse.click(Button.left,2)\n goal=\"Free\"\n else:\n goal=\"None\"\n\n\n \n\n \n print(start)\n if cv2.waitKey(10)==27:\n break\ncv2.destroyAllWindows()\ncap.release()\n ", "sub_path": "clicker.py", "file_name": "clicker.py", "file_ext": "py", "file_size_in_byte": 2982, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pynput.mouse.Controller", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.getTrackbarPos", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "pynput.mouse.Button.left", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pynput.mouse.Button", "line_number": 91, "usage_type": "name"}, {"api_name": "cv2.waitKey", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "86062287", "text": "import serial, time\n\nfrom model.StaticClasses.DataPacketFactory import DataPacketFactory\nfrom model.StaticClasses.GlobalConstants import GlobalConstants\nfrom model.StaticClasses.StrManipulator import StrManipulator\nfrom model.StaticClasses.Calculator import Calculator\nfrom queue import Queue\nfrom model.Data.Data import Data\nfrom model.Communication.CommunicationManager import CommunicationManager\nfrom model.StaticClasses.DataStructFunctions import DataStructFunctions\nfrom threading import Thread\n\n\nclass TestSerialCom:\n COM_PORT_NAME = 'COM1'\n BAUDRATE = 9600\n BYTESIZE = 8\n PACKET_LEN = 253\n\n def __init__(self):\n self.data_cnt = 0\n self.curr_heartbeat_period = 1000\n self.cur_ecc_period = 1000\n self.send_data_queue = Queue()\n self.read_data_queue = Queue()\n self.new_data_to_read = True\n self.ecc_index = 0\n\n def init_com(self):\n \"\"\"\n opens a serial communication port\n \"\"\"\n self.serial_port = serial.Serial(self.COM_PORT_NAME, self.BAUDRATE,\n self.BYTESIZE, serial.PARITY_NONE, serial.STOPBITS_ONE, timeout=0)\n\n def send_data_packet(self, data):\n \"\"\"\n send a generated data packet to the connected\n serial communication port, increments data_cnt\n \"\"\"\n DataPacketFactory.adjust_data_cnt(data, self.data_cnt)\n self.serial_port.write(data)\n self.data_cnt += 1\n\n def add_ecc_check_to_queue(self, ecc_addresses: list):\n err_cnt = len(ecc_addresses)\n overflow = len(ecc_addresses) > GlobalConstants.MAX_ADDRESSES_ECC\n split_addresses = DataStructFunctions.chunk(ecc_addresses, GlobalConstants.MAX_ADDRESSES_ECC)\n\n for addr in split_addresses:\n addr = ''.join(addr)\n ecc_check_frame = DataPacketFactory.get_packet('ECC_CHECKED',\n params={\n 'index': self.ecc_index,\n 'overflow': overflow,\n 'err_cnt': err_cnt,\n 'ecc_addresses': addr\n })\n self.send_data_queue.put(ecc_check_frame)\n\n self.ecc_index += 1\n\n def send_reset_request(self, reset_reason=''):\n reset_reason_code = DataStructFunctions.get_key(GlobalConstants.RESET_PURPOSES, reset_reason)\n params = {'reset_reason': ''}\n if reset_reason_code:\n params = {'reset_reason': reset_reason_code}\n reset_packet = DataPacketFactory.get_packet('RESET_RESPONSE', params)\n self.send_data_queue.put(reset_packet)\n\n def generate_ecc_check(self):\n \"\"\"\"\n generates ecc_check with memory addresses provided\n \"\"\"\n self.send_reset_request('SYST')\n self.send_reset_request('EXT')\n while True:\n ecc_addresses = ['000000', '111111', '222222', '333333', '444444', '555555', '666666', '777777', '888888',\n '999999']\n self.add_ecc_check_to_queue(ecc_addresses)\n time.sleep(self.cur_ecc_period / 1000)\n\n def read_data(self):\n \"\"\"\n reads data coming into the open communication port\n and adds them to te thread-safe queue for data analysis\n \"\"\"\n while True:\n if not self.serial_port: continue\n new_data = self.serial_port.read()\n new_data = new_data.hex()\n self.read_data_queue.put(new_data)\n\n def analyse_data(self):\n data = ''\n expected_data_len = GlobalConstants.HEX_DIGITS_PER_BYTE * GlobalConstants.MAX_PACKET_LEN + 1\n data_len_hex = ''\n while True:\n if not self.serial_port: continue\n new_data = self.read_data_queue.get()\n if new_data:\n if self.new_data_to_read:\n if new_data == GlobalConstants.START_CODE:\n data += new_data\n self.new_data_to_read = False\n else:\n if len(data) < expected_data_len:\n data += new_data\n # extract\n if GlobalConstants.PACKET_LEN_START_INDEX <= len(data) <= GlobalConstants.PACKET_LEN_END_INDEX:\n data_len_hex += new_data\n if len(data) == GlobalConstants.DATA_COUNTER_END_INDEX:\n expected_data_len = Calculator.get_int(data_len_hex) * GlobalConstants.HEX_DIGITS_PER_BYTE\n if len(data) == expected_data_len:\n data_packet = Data(data)\n self.analyse_data_purpose(data_packet)\n # clear data\n self.new_data_to_read = True\n data = ''\n expected_data_len = GlobalConstants.HEX_DIGITS_PER_BYTE * GlobalConstants.MAX_PACKET_LEN + 1\n data_len_hex = ''\n\n def analyse_data_purpose(self, data_packet: Data):\n data_packet.extract_data_payload()\n data_payload = data_packet.data_payload_value\n if data_packet.purpose == 'HEARTBEAT_REQUEST':\n heartbeat = self.analyse_heartbeat(data_payload)\n self.send_data_stream(heartbeat)\n\n def analyse_heartbeat(self, data_payload: str):\n period = StrManipulator.substring(data_payload, GlobalConstants.HEARTBEAT_PERIOD_START,\n GlobalConstants.HEARTBEAT_PERIOD_END)\n period = Calculator.get_int(period)\n self.curr_heartbeat_period = DataStructFunctions.get_key(GlobalConstants.HEARTBEAT_PERIODS, period)\n id = StrManipulator.substring(data_payload, GlobalConstants.HEARTBEAT_ID_START,\n GlobalConstants.HEARTBEAT_ID_END)\n id = Calculator.get_int(id)\n heartbeat = DataPacketFactory.get_packet('HEARTBEAT_RESPONSE', params={'heartbeat_id': id})\n return heartbeat\n\n def send_data_stream(self, heartbeat: bytearray):\n data_sent = 0\n max_frames = CommunicationManager.get_max_frames_num(self.curr_heartbeat_period, TestSerialCom.BAUDRATE)\n while (not self.send_data_queue.empty()) and data_sent < max_frames:\n data = self.send_data_queue.get()\n self.send_data_packet(data)\n data_sent += 1\n\n self.send_data_packet(heartbeat)\n\n\ntest = TestSerialCom()\ntest.init_com()\n\necc_check_thread = Thread(target=test.generate_ecc_check)\necc_check_thread.start()\n\nread_data_thread = Thread(target=test.read_data)\nread_data_thread.start()\n\nanalyse_data_thread = Thread(target=test.analyse_data)\nanalyse_data_thread.start()", "sub_path": "test/TestSerialCom.py", "file_name": "TestSerialCom.py", "file_ext": "py", "file_size_in_byte": 6924, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "queue.Queue", "line_number": 24, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 25, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 33, "usage_type": "call"}, {"api_name": "serial.PARITY_NONE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory.adjust_data_cnt", "line_number": 41, "usage_type": "call"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory", "line_number": 41, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.MAX_ADDRESSES_ECC", "line_number": 47, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 47, "usage_type": "name"}, {"api_name": "model.StaticClasses.DataStructFunctions.DataStructFunctions.chunk", "line_number": 48, "usage_type": "call"}, {"api_name": "model.StaticClasses.DataStructFunctions.DataStructFunctions", "line_number": 48, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.MAX_ADDRESSES_ECC", "line_number": 48, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 48, "usage_type": "name"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory.get_packet", "line_number": 52, "usage_type": "call"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory", "line_number": 52, "usage_type": "name"}, {"api_name": "model.StaticClasses.DataStructFunctions.DataStructFunctions.get_key", "line_number": 64, "usage_type": "call"}, {"api_name": "model.StaticClasses.DataStructFunctions.DataStructFunctions", "line_number": 64, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.RESET_PURPOSES", "line_number": 64, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 64, "usage_type": "name"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory.get_packet", "line_number": 68, "usage_type": "call"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory", "line_number": 68, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEX_DIGITS_PER_BYTE", "line_number": 96, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 96, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.MAX_PACKET_LEN", "line_number": 96, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.START_CODE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 103, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.PACKET_LEN_START_INDEX", "line_number": 110, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 110, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.PACKET_LEN_END_INDEX", "line_number": 110, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.DATA_COUNTER_END_INDEX", "line_number": 112, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 112, "usage_type": "name"}, {"api_name": "model.StaticClasses.Calculator.Calculator.get_int", "line_number": 113, "usage_type": "call"}, {"api_name": "model.StaticClasses.Calculator.Calculator", "line_number": 113, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEX_DIGITS_PER_BYTE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 113, "usage_type": "name"}, {"api_name": "model.Data.Data.Data", "line_number": 115, "usage_type": "call"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEX_DIGITS_PER_BYTE", "line_number": 120, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 120, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.MAX_PACKET_LEN", "line_number": 120, "usage_type": "attribute"}, {"api_name": "model.Data.Data.Data", "line_number": 123, "usage_type": "name"}, {"api_name": "model.StaticClasses.StrManipulator.StrManipulator.substring", "line_number": 131, "usage_type": "call"}, {"api_name": "model.StaticClasses.StrManipulator.StrManipulator", "line_number": 131, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEARTBEAT_PERIOD_START", "line_number": 131, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 131, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEARTBEAT_PERIOD_END", "line_number": 132, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 132, "usage_type": "name"}, {"api_name": "model.StaticClasses.Calculator.Calculator.get_int", "line_number": 133, "usage_type": "call"}, {"api_name": "model.StaticClasses.Calculator.Calculator", "line_number": 133, "usage_type": "name"}, {"api_name": "model.StaticClasses.DataStructFunctions.DataStructFunctions.get_key", "line_number": 134, "usage_type": "call"}, {"api_name": "model.StaticClasses.DataStructFunctions.DataStructFunctions", "line_number": 134, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEARTBEAT_PERIODS", "line_number": 134, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 134, "usage_type": "name"}, {"api_name": "model.StaticClasses.StrManipulator.StrManipulator.substring", "line_number": 135, "usage_type": "call"}, {"api_name": "model.StaticClasses.StrManipulator.StrManipulator", "line_number": 135, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEARTBEAT_ID_START", "line_number": 135, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 135, "usage_type": "name"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants.HEARTBEAT_ID_END", "line_number": 136, "usage_type": "attribute"}, {"api_name": "model.StaticClasses.GlobalConstants.GlobalConstants", "line_number": 136, "usage_type": "name"}, {"api_name": "model.StaticClasses.Calculator.Calculator.get_int", "line_number": 137, "usage_type": "call"}, {"api_name": "model.StaticClasses.Calculator.Calculator", "line_number": 137, "usage_type": "name"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory.get_packet", "line_number": 138, "usage_type": "call"}, {"api_name": "model.StaticClasses.DataPacketFactory.DataPacketFactory", "line_number": 138, "usage_type": "name"}, {"api_name": "model.Communication.CommunicationManager.CommunicationManager.get_max_frames_num", "line_number": 143, "usage_type": "call"}, {"api_name": "model.Communication.CommunicationManager.CommunicationManager", "line_number": 143, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 155, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 158, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "339523328", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport requests\n\n# Create your views here.\n\n@csrf_exempt\ndef process_message(request):\n if request.method == 'POST':\n sender = request.POST.get('sender')\n print('Sender = ', sender)\n recipient = request.POST.get('recipient')\n print('Recipient = ', recipient)\n subject = request.POST.get('subject', '')\n print('Subject = ', subject)\n\n body_plain = request.POST.get('body-plain', '')\n print('body_plain = ', body_plain)\n body_without_quotes = request.POST.get('stripped-text', '')\n print('body_without_quotes = ', body_without_quotes)\n # note: other MIME headers are also posted here...\n\n # attachments:\n for key in request.FILES:\n file = request.FILES[key]\n print(file)\n # do something with the file\n send_simple_message(sender, \"Re: \" + subject)\n\n # Returned text is ignored but HTTP status code matters:\n # Mailgun wants to see 2xx, otherwise it will make another attempt in 5 minutes\n return HttpResponse('OK')\n\ndef send_simple_message(recipient_email_id, subject):\n return requests.post(\n \"https://api.mailgun.net/v3/api.billwise.co/messages\",\n auth=(\"api\", \"key-ae6af4f4c6a04a3f48fa506094189246\"),\n # data={\"from\": \"Billwise postmaster@sandboxaeedb52bbc344d3db562ce0ddc5fb584.mailgun.org\",\n data={\"from\": \"Billwise super-smart-ai@api.billwise.co\", \n \"to\": [recipient_email_id],\n \"subject\": subject,\n \"text\": \"Hey, thanks for sending us your bill. We will parse it and add it to our knowledge base. Our bots are already at work using the new information to make your bill-paying existence better and more useful.\"})\n\n@csrf_exempt\ndef process_hook_from_context(request):\n\treturn HttpResponse('OK')\n\ndef process_receipt_message(request):\n\tprint(\"Receipt callback called\")\n\treturn HttpResponse('OK')", "sub_path": "email_parser/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.http.HttpResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 8, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 36, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 45, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "133777054", "text": "\"\"\"train cifar10 with pytorch.\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport random\nimport shutil\nimport argparse\nimport logging\nfrom time import *\n\nimport yaml\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as f\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\nimport torchvision\nimport torchvision.datasets\nimport torchvision.transforms as transforms\n\n\nimport datasets\nfrom models import *\nfrom trainer import *\n# import trainer\nfrom utils import *\n\ndef main(argv):\n\n parser = argparse.ArgumentParser(description=\"PyTorch CIFAR10 Training\", prog=\"main.py\")\n parser.add_argument(\"cfg\", help=\"Available models: \")\n parser.add_argument(\"--local_rank\",default=-1,type=int,help=\"the rank of this process\")\n parser.add_argument(\"--gpu\", default=\"0\", help=\"gpu ids, seperate by comma\")\n parser.add_argument(\"--save\", required=True,\n help=\"store logs/checkpoints under this directory\")\n parser.add_argument(\"--resume\", \"-r\", help=\"resume from checkpoint\")\n parser.add_argument(\"--pretrain\", action=\"store_true\",\n help=\"used with `--resume`, regard as a pretrain model, do not recover epoch/best_acc\")\n parser.add_argument(\"--no-cuda\", action=\"store_true\", default=False, help=\"do not use gpu\")\n parser.add_argument(\"--seed\", default=2020, help=\"random seed\", type=int)\n parser.add_argument(\"--save-every\", default=50, type=int, help=\"save every N epoch\")\n parser.add_argument(\"--distributed\", action=\"store_true\",help=\"whether to use distributed training\")\n parser.add_argument(\"--dataset-path\", default=None, help=\"dataset path\")\n args = parser.parse_args(argv)\n\n savepath = args.save\n\n if args.local_rank > -1:\n torch.cuda.set_device(args.local_rank)\n \n if args.distributed: \n # device = torch.cuda.current_device()\n torch.distributed.init_process_group(backend=\"nccl\")\n else:\n gpus = [int(d) for d in args.gpu.split(\",\")]\n torch.cuda.set_device(gpus[0])\n\n\n if not os.path.isdir(savepath):\n if sys.version_info.major == 2:\n os.makedirs(savepath)\n else:\n os.makedirs(savepath, exist_ok=True)\n # Setup logfile\n\n # log_format = \"%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s \\t: %(message)s\"\n log_format = \"%(message)s\"\n logging.basicConfig(\n level=logging.INFO,\n format=log_format,\n filemode=\"w\")\n if args.local_rank == 0 or args.local_rank == -1:\n file_handler = logging.FileHandler(os.path.join(savepath, \"train.log\"))\n else:\n # file_handler = logging.FileHandler(\"/dev/null\")\n file_handler = logging.FileHandler(os.path.join(savepath, \"train_{}.log\".format(args.local_rank)))\n file_handler.setFormatter(logging.Formatter(log_format))\n logging.getLogger().addHandler(file_handler)\n logging.info(\"CMD: %s\", \" \".join(sys.argv))\n\n\n if args.seed is not None:\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)\n\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.enabled = True\n\n import numpy as np\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n logging.info(\"Setting Random Seed {}\".format(args.seed))\n\n\n # Load and backup configuration file\n shutil.copyfile(args.cfg, os.path.join(savepath, \"config.yaml\"))\n with open(args.cfg) as cfg_f:\n cfg = yaml.load(cfg_f)\n \n device = \"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\"\n \n if device == \"cuda\":\n logging.info(\"Using GPU! Available gpu count: {}\".format(torch.cuda.device_count()))\n else:\n logging.info(\"\\033[1;3mWARNING: Using CPU!\\033[0m\")\n\n\n # For Fix cfg\n if cfg[\"trainer\"].get(\"fix\",None) is not None:\n\n # Regenerate the default generation cfg\n import nics_fix_pt as nfp\n import nics_fix_pt.nn_fix as nnf\n import numpy as np\n\n def _generate_default_fix_cfg(names, scale=0, bitwidth=8, method=0):\n return {\n n: {\n \"method\": torch.autograd.Variable(\n torch.IntTensor(np.array([method])), requires_grad=False\n ),\n \"scale\": torch.autograd.Variable(\n torch.IntTensor(np.array([scale])), requires_grad=False\n ),\n \"bitwidth\": torch.autograd.Variable(\n torch.IntTensor(np.array([bitwidth])), requires_grad=False\n ),\n # \"range_method\": nfp.RangeMethod.RANGE_MAX_TENPERCENT,\n # \"range_method\": nfp.RangeMethod.RANGE_SWEEP\n \"range_method\": nfp.RangeMethod.RANGE_MAX,\n \"stochastic\": cfg[\"trainer\"][\"fix\"][\"stochastic\"],\n \"float_scale\": cfg[\"trainer\"][\"fix\"][\"float_scale\"],\n \"zero_point\": cfg[\"trainer\"][\"fix\"][\"zero_point\"],\n \"group\": cfg[\"trainer\"][\"fix\"][\"group\"],\n }\n for n in names\n }\n\n # Fix-Net are defined in the fix_utlis.py, it has dependency of _generate_default_fix_cfg\n # so make sure import it after defining the function based-on the config\n import fix_utils\n from fix_utils import set_fix_mode\n\n\n# ---------------------- Dataset -------------------------\n\n logging.info(\"==> Preparing data..\")\n if cfg[\"trainer_type\"] == \"plain\":\n if cfg[\"trainer\"][\"dataset\"] == \"cifar\":\n trainloader,validloader, ori_trainloader, testloader, _ = datasets.cifar10(cfg[\"trainer\"].get(\"train_batch_size\",None), cfg[\"trainer\"].get(\"test_batch_size\",None), cfg[\"trainer\"].get(\"train_transform\", None), cfg[\"trainer\"].get(\"test_transform\", None), train_val_split_ratio = None, distributed=args.distributed, root=args.dataset_path)\n elif cfg[\"trainer\"][\"dataset\"] == \"imagenet\":\n trainloader,validloader, ori_trainloader, testloader, _ = datasets.imagenet(cfg[\"trainer\"][\"train_batch_size\"], cfg[\"trainer\"][\"test_batch_size\"], cfg[\"trainer\"].get(\"train_transform\", None), cfg[\"trainer\"].get(\"test_transform\", None), train_val_split_ratio = None, distributed=args.distributed, path=args.dataset_path)\n elif cfg[\"trainer_type\"] == \"semi\":\n if cfg[\"trainer\"][\"dataset\"] == \"cifar\":\n trainloader, testloader, _ = datasets.semi_cifar10(numlabel=cfg[\"trainer\"][\"numlabel\"], \n label_bs=cfg[\"trainer\"].get(\"label_batch_size\",None),\n train_bs=cfg[\"trainer\"].get(\"train_batch_size\",None),\n test_bs=cfg[\"trainer\"].get(\"test_batch_size\",None),\n train_transform=None,\n test_transform=None,\n root=args.dataset_path,\n label_dir=None)\n elif cfg[\"trainer\"][\"dataset\"] == \"svhn\":\n trainloader_l,trainloader_u,valloader,testloader = datasets.semi_svhn(\n numlabel=cfg[\"trainer\"][\"numlabel\"],\n label_bs=cfg[\"trainer\"].get(\"label_batch_size\",None),\n train_bs=cfg[\"trainer\"].get(\"train_batch_size\",None),\n test_bs=cfg[\"trainer\"].get(\"test_batch_size\",None),\n train_transform=None,\n test_transform=None,\n root=args.dataset_path,\n cfg=cfg\n )\n elif cfg[\"trainer_type\"] == \"da\":\n d_da = {\n # Digits\n \"s\": \"svhn\",\n \"m\": \"mnist\",\n \"syn\": \"synth\",\n \"synth-sl\": \"synth-small\",\n \"u\": \"usps\",\n # Office 31\n \"d\": \"dslr\",\n \"w\": \"webcam\",\n \"p\": \"pascal\",\n # Caltech\n \"c\": \"Caltech\",\n # imgnet\n \"i\": \"imagenet\"\n }\n\n cfg[\"trainer\"][\"source\"] = d_da[cfg[\"trainer\"][\"source\"]]\n cfg[\"trainer\"][\"target\"] = d_da[cfg[\"trainer\"][\"target\"]]\n\n if cfg[\"trainer\"][\"dataset\"] == \"digits\":\n trainloader, testloader = datasets.da_digits(\n domains = [cfg[\"trainer\"][\"source\"],cfg[\"trainer\"][\"target\"]],\n train_bs = cfg[\"trainer\"].get(\"train_batch_size\",None),\n test_bs = cfg[\"trainer\"].get(\"test_batch_size\",None),\n train_transform=None,\n test_transform=None,\n root=args.dataset_path,\n cfg=cfg\n )\n ## Build model\n\n logging.info(\"==> Building model..\")\n\n ## ------- Net --------------\n ## -----(Fix Cfg Here)-------\n net_type = cfg[\"trainer\"][\"model\"]\n if net_type == \"vgg\":\n net = vgg.VGG(\"VGG16\")\n elif net_type == \"convnet\":\n if cfg[\"trainer\"].get(\"fix\",None) is not None:\n net = fix_utils.MyNet_fix(fix=True, fix_bn=cfg[\"trainer\"][\"fix\"][\"fix_bn\"], bitwidths=list(cfg[\"trainer\"][\"fix\"][\"bitwidth\"].values()),default_f=_generate_default_fix_cfg)\n else:\n net = convnet.MyNet()\n\n\n ## ---- Setting the Fix-Mode -------\n if cfg[\"trainer\"].get(\"fix\",None) is not None:\n set_fix_mode(net,\"train\",cfg[\"trainer\"])\n\n # Copy a piece of net for semi training & DA\n if cfg[\"trainer_type\"]==\"semi\" or cfg[\"trainer_type\"]==\"da\":\n if cfg[\"trainer\"].get(\"fix\",None) is not None:\n net_ = type(net)(fix=True, fix_bn=cfg[\"trainer\"][\"fix\"][\"fix_bn\"], bitwidths=list(cfg[\"trainer\"][\"fix\"][\"bitwidth\"].values()),default_f=_generate_default_fix_cfg)\n else:\n net_ = type(net)()\n net_.load_state_dict(net.state_dict())\n net_ = net_.to(device)\n if device == \"cuda\":\n cudnn.benchmark = True\n if args.distributed:\n p_net_ = torch.nn.parallel.DistributedDataParallel(net_, [args.local_rank], output_device=args.local_rank, find_unused_parameters=True)\n else: \n if len(gpus) > 1:\n p_net_ = torch.nn.DataParallel(net_, gpus)\n else:\n p_net_ = net_\n\n net = net.to(device)\n if device == \"cuda\":\n cudnn.benchmark = True\n if args.distributed:\n p_net = torch.nn.parallel.DistributedDataParallel(net, [args.local_rank], output_device=args.local_rank, find_unused_parameters=True)\n else: \n if len(gpus) > 1:\n p_net = torch.nn.DataParallel(net, gpus)\n else:\n p_net = net\n\n ## Build trainer and train\n if cfg[\"trainer_type\"] == \"plain\":\n trainer_ = trainer.Trainer(net,p_net,[trainloader,validloader,ori_trainloader],testloader,\n savepath=savepath,\n save_every=args.save_every,\n log=logging.info, cfg=cfg[\"trainer\"])\n elif cfg[\"trainer_type\"] == \"semi\":\n if cfg[\"trainer\"][\"dataset\"] == \"cifar\":\n trainer_ = semi_trainer.SemiTrainer(net, net_, p_net, p_net_,\n trainloader, testloader,\n savepath=savepath,\n save_every=args.save_every, \n log=logging.info,\n cfg=cfg[\"trainer\"])\n if cfg[\"trainer\"][\"dataset\"] == \"svhn\":\n trainer_ = semi_trainer.SemiTrainer(net, net_, p_net, p_net_,\n [trainloader_l,trainloader_u], testloader,\n savepath=savepath,\n save_every=args.save_every, \n log=logging.info,\n cfg=cfg[\"trainer\"])\n elif cfg[\"trainer_type\"] == \"da\":\n trainer_ = da_trainer.DATrainer(net, net_, p_net, p_net_,\n trainloader, testloader,\n savepath=savepath,\n save_every=args.save_every, \n log=logging.info,\n cfg=cfg[\"trainer\"])\n\n\n trainer_.init(device=device, local_rank=args.local_rank,resume=args.resume, pretrain=args.pretrain)\n trainer_.train()\n\n # Default save for plot\n torch.save({\"net\":trainer_.net.state_dict()}, os.path.join(savepath,'ckpt_final.t7'))\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 13243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.cuda.set_device", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.distributed.init_process_group", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 67, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 75, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 98, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 99, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 100, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 108, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 111, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.IntTensor", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.IntTensor", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torch.IntTensor", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "nics_fix_pt.RangeMethod", "line_number": 138, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 155, "usage_type": "call"}, {"api_name": "datasets.cifar10", "line_number": 158, "usage_type": "call"}, {"api_name": "datasets.imagenet", "line_number": 160, "usage_type": "call"}, {"api_name": "datasets.semi_cifar10", "line_number": 163, "usage_type": "call"}, {"api_name": "datasets.semi_svhn", "line_number": 172, "usage_type": "call"}, {"api_name": "datasets.da_digits", "line_number": 204, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 215, "usage_type": "call"}, {"api_name": "fix_utils.MyNet_fix", "line_number": 224, "usage_type": "call"}, {"api_name": "fix_utils.set_fix_mode", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 242, "usage_type": "name"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 244, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 247, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 253, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 253, "usage_type": "name"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 255, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 258, "usage_type": "attribute"}, {"api_name": "trainer.Trainer", "line_number": 264, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 267, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 274, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 281, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 288, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 296, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 296, "usage_type": "call"}, {"api_name": "os.path", "line_number": 296, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 299, "usage_type": "attribute"}]} +{"seq_id": "54956770", "text": "from datetime import timedelta\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.dispatch import Signal\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.signing import (\n TimestampSigner, SignatureExpired, BadSignature\n)\nfrom django.utils import timezone\n\nfrom froide.helper.redaction import can_redact_file\nfrom froide.helper.storage import HashedFilenameStorage\nfrom froide.helper.document import (\n PDF_FILETYPES, EMBEDDABLE_FILETYPES, IMAGE_FILETYPES,\n can_convert_to_pdf\n)\nfrom froide.document.models import Document\n\nfrom .message import FoiMessage\n\n\nDELETE_TIMEFRAME = timedelta(hours=36)\n\n\ndef upload_to(instance, filename):\n # name will be irrelevant\n # as hashed filename storage will drop it\n # and use only directory\n return \"%s/%s\" % (settings.FOI_MEDIA_PATH, instance.name)\n\n\nclass FoiAttachment(models.Model):\n belongs_to = models.ForeignKey(\n FoiMessage, null=True,\n verbose_name=_(\"Belongs to request\"),\n on_delete=models.CASCADE,\n related_name='foiattachment_set'\n )\n name = models.CharField(_(\"Name\"), max_length=255)\n file = models.FileField(\n _(\"File\"), upload_to=upload_to, max_length=255,\n storage=HashedFilenameStorage(),\n db_index=True\n )\n size = models.IntegerField(_(\"Size\"), blank=True, null=True)\n filetype = models.CharField(_(\"File type\"), blank=True, max_length=100)\n format = models.CharField(_(\"Format\"), blank=True, max_length=100)\n can_approve = models.BooleanField(_(\"User can approve\"), default=True)\n approved = models.BooleanField(_(\"Approved\"), default=False)\n redacted = models.ForeignKey('self', verbose_name=_(\"Redacted Version\"),\n null=True, blank=True, on_delete=models.SET_NULL,\n related_name='unredacted_set')\n is_redacted = models.BooleanField(_(\"Is redacted\"), default=False)\n converted = models.ForeignKey('self', verbose_name=_(\"Converted Version\"),\n null=True, blank=True, on_delete=models.SET_NULL,\n related_name='original_set')\n is_converted = models.BooleanField(_(\"Is converted\"), default=False)\n timestamp = models.DateTimeField(null=True, default=timezone.now)\n\n document = models.OneToOneField(\n Document, null=True, blank=True,\n related_name='attachment',\n on_delete=models.SET_NULL\n )\n\n attachment_published = Signal(providing_args=[])\n\n class Meta:\n ordering = ('name',)\n unique_together = ((\"belongs_to\", \"name\"),)\n # order_with_respect_to = 'belongs_to'\n verbose_name = _('Attachment')\n verbose_name_plural = _('Attachments')\n\n def __str__(self):\n return \"%s (%s) of %s\" % (self.name, self.size, self.belongs_to)\n\n def index_content(self):\n return \"\\n\".join((self.name,))\n\n def get_html_id(self):\n return _(\"attachment-%(id)d\") % {\"id\": self.id}\n\n def get_internal_url(self):\n return settings.FOI_MEDIA_URL + self.file.name\n\n def get_bytes(self):\n self.file.open(mode='rb')\n try:\n return self.file.read()\n finally:\n self.file.close()\n\n @property\n def can_redact(self):\n return can_redact_file(self.filetype, name=self.name)\n\n @property\n def can_delete(self):\n if not self.belongs_to.is_postal:\n return False\n if not self.can_approve:\n return False\n now = timezone.now()\n return self.timestamp > (now - DELETE_TIMEFRAME)\n\n @property\n def pending(self):\n return not self.file\n\n @property\n def can_edit(self):\n return self.can_redact or self.can_delete or self.can_approve\n\n @property\n def allow_link(self):\n return self.approved or not (self.can_redact and self.can_approve)\n\n @property\n def is_pdf(self):\n return self.filetype in PDF_FILETYPES or (\n self.name.endswith('.pdf') and self.filetype == 'application/octet-stream'\n )\n\n @property\n def is_image(self):\n return (\n self.filetype.startswith('image/') or\n self.filetype in IMAGE_FILETYPES or\n self.name.endswith(('.jpg', '.jpeg', '.gif', '.png'))\n )\n\n @property\n def is_mail_decoration(self):\n return self.is_image and self.size and self.size < 1024 * 60\n\n @property\n def is_irrelevant(self):\n return self.is_mail_decoration or self.is_signature\n\n @property\n def is_signature(self):\n return self.name.endswith(('.p7s', '.vcf', '.asc')) and self.size < 1024 * 15\n\n @property\n def can_embed(self):\n return self.filetype in EMBEDDABLE_FILETYPES or self.is_pdf\n\n def get_anchor_url(self):\n if self.belongs_to:\n return '%s#%s' % (self.belongs_to.request.get_absolute_url(),\n self.get_html_id())\n return '#' + self.get_html_id()\n\n def get_domain_anchor_url(self):\n return '%s%s' % (settings.SITE_URL, self.get_anchor_url())\n\n def get_absolute_url(self):\n fr = self.belongs_to.request\n return reverse(\n 'foirequest-show_attachment',\n kwargs={\n 'slug': fr.slug,\n 'message_id': self.belongs_to.pk,\n 'attachment_name': self.name\n }\n )\n\n def get_absolute_domain_url(self):\n return '%s%s' % (settings.SITE_URL, self.get_absolute_url())\n\n def get_absolute_file_url(self, authenticated=False):\n if not self.name:\n return ''\n url = reverse('foirequest-auth_message_attachment',\n kwargs={\n 'message_id': self.belongs_to_id,\n 'attachment_name': self.name\n })\n if settings.FOI_MEDIA_TOKENS and authenticated:\n signer = TimestampSigner()\n value = signer.sign(url).split(':', 1)[1]\n return '%s?token=%s' % (url, value)\n return url\n\n def get_file_url(self):\n return self.get_absolute_domain_file_url()\n\n def get_file_path(self):\n if self.file:\n return self.file.path\n return ''\n\n def get_authenticated_absolute_domain_file_url(self):\n return self.get_absolute_domain_file_url(authenticated=True)\n\n def get_absolute_domain_file_url(self, authenticated=False):\n return '%s%s' % (\n settings.FOI_MEDIA_DOMAIN,\n self.get_absolute_file_url(authenticated=authenticated)\n )\n\n def check_token(self, request):\n token = request.GET.get('token')\n if token is None:\n return None\n original = '%s:%s' % (self.get_absolute_file_url(), token)\n signer = TimestampSigner()\n try:\n signer.unsign(original, max_age=settings.FOI_MEDIA_TOKEN_EXPIRY)\n except SignatureExpired:\n return None\n except BadSignature:\n return False\n return True\n\n def approve_and_save(self):\n self.approved = True\n self.save()\n self.attachment_published.send(sender=self)\n\n def remove_file_and_delete(self):\n if self.file:\n other_references = FoiAttachment.objects.filter(\n file=self.file.name\n ).exclude(id=self.id).exists()\n if not other_references:\n self.file.delete(save=False)\n self.delete()\n\n def can_convert_to_pdf(self):\n ft = self.filetype.lower()\n name = self.name.lower()\n return (\n self.converted_id is None and\n can_convert_to_pdf(ft, name=name)\n )\n\n def create_document(self, title=None):\n if self.document is not None:\n return self.document\n\n if not self.is_pdf:\n return\n\n foirequest = self.belongs_to.request\n doc = Document.objects.create(\n original=self,\n user=foirequest.user,\n public=foirequest.public,\n title=title or self.name,\n foirequest=self.belongs_to.request,\n publicbody=self.belongs_to.sender_public_body\n )\n self.document = doc\n self.save()\n return doc\n", "sub_path": "froide/foirequest/models/attachment.py", "file_name": "attachment.py", "file_ext": "py", "file_size_in_byte": 8141, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.settings.FOI_MEDIA_PATH", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 35, "usage_type": "call"}, {"api_name": "message.FoiMessage", "line_number": 36, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models.CASCADE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models.FileField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 43, "usage_type": "call"}, {"api_name": "froide.helper.storage.HashedFilenameStorage", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models.SET_NULL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models.SET_NULL", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 62, "usage_type": "call"}, {"api_name": "froide.document.models.Document", "line_number": 63, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.dispatch.Signal", "line_number": 68, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 74, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 75, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 84, "usage_type": "call"}, {"api_name": "django.conf.settings.FOI_MEDIA_URL", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 87, "usage_type": "name"}, {"api_name": "froide.helper.redaction.can_redact_file", "line_number": 98, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 106, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 106, "usage_type": "name"}, {"api_name": "froide.helper.document.PDF_FILETYPES", "line_number": 123, "usage_type": "name"}, {"api_name": "froide.helper.document.IMAGE_FILETYPES", "line_number": 131, "usage_type": "name"}, {"api_name": "froide.helper.document.EMBEDDABLE_FILETYPES", "line_number": 149, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_URL", "line_number": 158, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 158, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 162, "usage_type": "call"}, {"api_name": "django.conf.settings.SITE_URL", "line_number": 172, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 172, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 177, "usage_type": "call"}, {"api_name": "django.conf.settings.FOI_MEDIA_TOKENS", "line_number": 182, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 182, "usage_type": "name"}, {"api_name": "django.core.signing.TimestampSigner", "line_number": 183, "usage_type": "call"}, {"api_name": "django.conf.settings.FOI_MEDIA_DOMAIN", "line_number": 201, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 201, "usage_type": "name"}, {"api_name": "django.core.signing.TimestampSigner", "line_number": 210, "usage_type": "call"}, {"api_name": "django.conf.settings.FOI_MEDIA_TOKEN_EXPIRY", "line_number": 212, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 212, "usage_type": "name"}, {"api_name": "django.core.signing.SignatureExpired", "line_number": 213, "usage_type": "name"}, {"api_name": "django.core.signing.BadSignature", "line_number": 215, "usage_type": "name"}, {"api_name": "froide.helper.document.can_convert_to_pdf", "line_number": 238, "usage_type": "call"}, {"api_name": "froide.document.models.Document.objects.create", "line_number": 249, "usage_type": "call"}, {"api_name": "froide.document.models.Document.objects", "line_number": 249, "usage_type": "attribute"}, {"api_name": "froide.document.models.Document", "line_number": 249, "usage_type": "name"}]} +{"seq_id": "368021972", "text": "import pytest\n\nfrom antidote import world\nfrom antidote._internal.world import new_container, LazyDependency\nfrom antidote.core import Container\n\n\ndef test_new_container():\n assert isinstance(new_container(), Container)\n\n\ndef test_dependency():\n with world.test.empty():\n world.test.singleton('x', object())\n d = LazyDependency('x', object)\n assert d.unwrapped == 'x'\n assert d.get() is world.get('x')\n\n class A:\n pass\n\n with world.test.empty():\n world.test.singleton('a', A())\n world.test.singleton('x', object())\n\n assert LazyDependency('a', A).get() is world.get('a')\n\n with pytest.raises(TypeError):\n LazyDependency('x', A).get()\n", "sub_path": "tests/internals/utils/test_world.py", "file_name": "test_world.py", "file_ext": "py", "file_size_in_byte": 720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "antidote.core.Container", "line_number": 9, "usage_type": "argument"}, {"api_name": "antidote._internal.world.new_container", "line_number": 9, "usage_type": "call"}, {"api_name": "antidote.world.test.empty", "line_number": 13, "usage_type": "call"}, {"api_name": "antidote.world.test", "line_number": 13, "usage_type": "attribute"}, {"api_name": "antidote.world", "line_number": 13, "usage_type": "name"}, {"api_name": "antidote.world.test.singleton", "line_number": 14, "usage_type": "call"}, {"api_name": "antidote.world.test", "line_number": 14, "usage_type": "attribute"}, {"api_name": "antidote.world", "line_number": 14, "usage_type": "name"}, {"api_name": "antidote._internal.world.LazyDependency", "line_number": 15, "usage_type": "call"}, {"api_name": "antidote.world.get", "line_number": 17, "usage_type": "call"}, {"api_name": "antidote.world", "line_number": 17, "usage_type": "name"}, {"api_name": "antidote.world.test.empty", "line_number": 22, "usage_type": "call"}, {"api_name": "antidote.world.test", "line_number": 22, "usage_type": "attribute"}, {"api_name": "antidote.world", "line_number": 22, "usage_type": "name"}, {"api_name": "antidote.world.test.singleton", "line_number": 23, "usage_type": "call"}, {"api_name": "antidote.world.test", "line_number": 23, "usage_type": "attribute"}, {"api_name": "antidote.world", "line_number": 23, "usage_type": "name"}, {"api_name": "antidote.world.test.singleton", "line_number": 24, "usage_type": "call"}, {"api_name": "antidote.world.test", "line_number": 24, "usage_type": "attribute"}, {"api_name": "antidote.world", "line_number": 24, "usage_type": "name"}, {"api_name": "antidote._internal.world.LazyDependency", "line_number": 26, "usage_type": "call"}, {"api_name": "antidote.world.get", "line_number": 26, "usage_type": "call"}, {"api_name": "antidote.world", "line_number": 26, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 28, "usage_type": "call"}, {"api_name": "antidote._internal.world.LazyDependency", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "654078234", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 2 10:49:06 2018\n\n@author: Arafat\n\"\"\"\n\n#=================== Imported packages & modules ==============================\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom id3 import Id3Estimator\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport sklearn.metrics as mt\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#==============================================================================\n\ndef train_and_test(model, x, y):\n model.fit(x, y)\n res = model.predict(x)\n return mt.accuracy_score(res, y)\n \ndef train_tst_split(model, x, y):\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 4)\n model.fit(x_train, y_train)\n res = model.predict(x_test)\n return mt.accuracy_score(res, y_test)\n \ndef k_fold(model, x, y):\n scr = cross_val_score(model, x, y, cv=10, scoring='accuracy')\n return scr.mean()\n\ndef ploting(val, objects):\n y_pos = np.arange(len(objects))\n plt.bar(y_pos, val, align='center', alpha=0.5)\n plt.xticks(y_pos, objects)\n plt.ylabel('Accuracy percentage')\n plt.xlabel('Models')\n plt.title('Model Accuracy')\n plt.show()\n\nif __name__ == '__main__':\n file = 'first.xlsx'\n \n # Load spreadsheet\n data = pd.read_excel(file)\n \n #columns\n fcols = [\n 'competitive programming background',\n 'professional skill',\n 'research background',\n 'final year projct type',\n 'enthusiasm',\n 'teamwork ability',\n 'communication & network skill',\n 'cgpa'\n ]\n \n #selecting train data\n x = data[fcols]\n y = data['current job field']\n \n #initilizing the models\n lr = LogisticRegression()\n knn = KNeighborsClassifier(n_neighbors=5)\n gnb = GaussianNB()\n mn = MultinomialNB()\n ber = BernoulliNB()\n tree = DecisionTreeClassifier()\n id3 = Id3Estimator()\n rnd = RandomForestClassifier(n_estimators=300)\n svc = SVC()\n mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, \n hidden_layer_sizes=(5, 2), random_state=1)\n \n \"\"\"a = tree.fit(x,y)\n print(a.feature_importances_)\"\"\"\n val = list()\n val.append(k_fold(lr, x, y))\n val.append(k_fold(knn, x, y))\n val.append(k_fold(gnb, x, y))\n val.append(k_fold(mn, x, y))\n val.append(k_fold(ber, x, y))\n val.append(k_fold(tree, x, y))\n val.append(k_fold(id3, x, y))\n val.append(k_fold(rnd, x, y))\n val.append(k_fold(svc, x, y))\n val.append(k_fold(mlp, x, y))\n\n #k fold cross validation score\n print('k-fold')\n print('Logistic Regression accuracy score:', val[0])\n print('knn accuracy score:', val[1])\n print('Gaussian Naive Bayes accuracy score:', val[2])\n print('Multinominal Naive Bayes accuracy score:', val[3])\n print('Bernoulli Naive Bayes accuracy score:', val[4])\n print('Decision Tree(CART) accuracy score:', val[5])\n print('Decision Tree(ID3) accuracy score:', val[6])\n print('Random Forest accuracy score:', val[7])\n print('Support Vector Machine accuracy score:', val[8])\n print('MLP Neural Network accuracy score:', val[9])\n \n objects = ('LR','KNN','GNB','MNB','BNB','CART','ID3', 'RF','SVM', 'MLP')\n val2 = [i*100 for i in val]\n ploting(val2, objects)\n \n ", "sub_path": "codes/RFvsCART.py", "file_name": "RFvsCART.py", "file_ext": "py", "file_size_in_byte": 3668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.metrics.accuracy_score", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 29, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 35, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.BernoulliNB", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 78, "usage_type": "call"}, {"api_name": "id3.Id3Estimator", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "197723752", "text": "from datetime import datetime\nimport gviz_api\n\nclass DataConverter:\n\n @staticmethod\n def round_if_float(value):\n return round(value,2) if isinstance(value, float) else value\n\n @staticmethod\n def convert_uv_value(val):\n return {'date': datetime.strptime(val['Date'], '%Y-%m-%d %H:%M'),\n 'forecast': DataConverter.round_if_float(val['Forecast']),\n 'forecast_annotation': None,\n 'forecast_annotation_text': None,\n 'measured': DataConverter.round_if_float(val['Measured']),\n 'measured_annotation': None,\n 'measured_annotation_text': None,\n 'low': 3,\n 'low_tooltip': 'Low',\n 'medium': 3,\n 'medium_tooltip': 'Medium',\n 'high': 2,\n 'high_tooltip': 'High',\n 'very_high': 3,\n 'very_high_tooltip': 'Very High',\n 'extreme': 4,\n 'extreme_tooltip': 'Extreme'}\n\n def convert_uv_data(self, data):\n\n description = {\"date\": (\"datetime\", \"Time\", {\"role\": \"domain\"}),\n \"forecast\": (\"number\", \"Forecast\"),\n \"forecast_annotation\": ('string', '', {\"role\": \"annotation\"}),\n \"forecast_annotation_text\": ('string', '', {\"role\": \"annotationText\"}),\n \"measured\": (\"number\", \"Measured\"),\n \"measured_annotation\": ('string', '', {\"role\": \"annotation\"}),\n \"measured_annotation_text\": ('string', '', {\"role\": \"annotationText\"}),\n \"low\": (\"number\", \"Low\"),\n \"low_tooltip\": (\"string\", \"Low\", {\"role\": \"tooltip\"}),\n \"medium\": (\"number\", \"Medium\"),\n \"medium_tooltip\": (\"string\", \"Medium\", {\"role\": \"tooltip\"}),\n \"high\": (\"number\", \"High\"),\n \"high_tooltip\": (\"string\", \"High\", {\"role\": \"tooltip\"}),\n \"very_high\": (\"number\", \"Very High\"),\n \"very_high_tooltip\": (\"string\", \"Very High\", {\"role\": \"tooltip\"}),\n \"extreme\": (\"number\", \"Extreme\"),\n \"extreme_tooltip\": (\"string\", \"Extreme\", {\"role\": \"tooltip\"}),\n }\n\n data = list(map(DataConverter.convert_uv_value, data['GraphData']))\n\n for e in reversed(data):\n if e['measured'] is not None:\n e['measured_annotation'] = \"%.2f @ %s\" % (e['measured'], datetime.strftime(e['date'], '%H:%M'))\n break\n\n data_table = gviz_api.DataTable(description)\n data_table.LoadData(data)\n\n return data_table.ToJSon(columns_order=['date', 'low', 'low_tooltip',\n 'medium', 'medium_tooltip',\n 'high', 'high_tooltip',\n 'very_high', 'very_high_tooltip',\n 'extreme', 'extreme_tooltip',\n 'forecast', 'forecast_annotation',\n \"forecast_annotation_text\",\n 'measured','measured_annotation',\n 'measured_annotation_text'])\n\n\n\n#\n# res = DataConverter().convert_uv_data(open('../tests/fixtures/uv_data.json'))\n# import code; code.interact(local=dict(globals(), **locals()))\n", "sub_path": "uvindx_info/data_converter.py", "file_name": "data_converter.py", "file_ext": "py", "file_size_in_byte": 3489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "name"}, {"api_name": "gviz_api.DataTable", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "317042223", "text": "import asyncio\nimport discord\nfrom discord.ext import commands\n\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nimport sys,os\n\nimport re\nimport json\n\nimport colorama\nfrom colorama import Fore, Back, Style\ncolorama.init()\n\nfrom utils import data\nfrom utils import imaging\nfrom utils import funcs\nfrom utils import checks\n\n#from apscheduler.schedulers.asyncio import AsyncIOScheduler\n\nmodules = [ #What \"cogs\" to load up\n\t\"mods.management\",\n\t\"mods.fun\",\n\t\"mods.misc\",\n\t\"mods.owner\",\n\t\"mods.nsfw\",\n\t\"mods.image\",\n\t\"mods.admin\",\n\t\"mods.face\"\n]\n\nif os.path.isfile(\"mods/mrmeme.py\"):\n\tmodules.append(\"mods.mrmeme\")\n\nclass Object(object): pass\n\ndef get_responses(): #Get all the responses for different personalities and reconstruct any missing responses.\n\twith open(\"messages.json\",\"r\") as f:\n\t\tresponses = json.loads(f.read())\n\tdefaults = responses.pop('normal',None)\n\tnon_defaults = responses\n\tfor nd in non_defaults: #Go through all message sets, except for the default one.\n\t\tfor emoji in defaults['emoji']:\n\t\t\tif not emoji in responses[nd]['emoji']:\n\t\t\t\tresponses[nd]['emoji'][emoji] = defaults['emoji'][emoji]\n\t\tfor msg in defaults['global']:\n\t\t\tif not msg in responses[nd]['global']: #If the current non_default doesn't have the global message, add it.\n\t\t\t\tresponses[nd]['global'][msg] = defaults['global'][msg]\n\t\tfor group in defaults['commands']: #Go through all the command message groups.\n\t\t\tif not group in responses[nd]['commands']: #If the command group is missing, add it from defaults.\n\t\t\t\tresponses[nd]['commands'][group] = defaults['commands'][group]\n\t\t\t\tcontinue\n\t\t\tif group in responses[nd]['commands']: #If the command group exists, look for missing messages and add them.\n\t\t\t\tfor msg in defaults['commands'][group]:\n\t\t\t\t\tif not msg in responses[nd]['commands'][group]:\n\t\t\t\t\t\tresponses[nd]['commands'][group][msg] = defaults['commands'][group][msg]\n\tresponses['normal'] = defaults\n\t#resolve emoji's\n\t\"\"\"for pname, personality in responses.items():\n\t\tprint(pname)\n\t\temoji = responses[pname]['emoji']\n\t\temojis = emoji.keys()\n\t\tfor msgkey, globalmsg in personality['global'].items():\n\t\t\tfor key in emojis:\n\t\t\t\tif globalmsg.find(\":\"+key+\":\") > -1:\n\t\t\t\t\ts = globalmsg.replace(\":\"+key+\":\",emoji[key])\n\t\t\t\t\tresponses[pname]['global'][msgkey] = s\n\t\t\t\t\t#print(\"replacing: responses['\" + pname + \"']['globals']['\"+msgkey+\"'] -> \"+s)\n\t\t\t\t\t#print(pname,s)\n\t\tfor cmdkey, cmdmsgset in personality['commands'].items():\n\t\t\tfor cmdname, cmdmsg in cmdmsgset.items():\n\t\t\t\t#print(cmdmsg)\n\t\t\t\tfor key in emojis:\n\t\t\t\t\tif cmdmsg.find(\":\"+key+\":\") > -1:\n\t\t\t\t\t\ts = cmdmsg.replace(\":\"+key+\":\",emoji[key])\n\t\t\t\t\t\tprint(s,pname,cmdkey,cmdname)\n\t\t\t\t\t\tresponses[pname]['commands'][cmdkey][cmdname] = s\n\t\t\t\t\t\t#print(\"replacing: responses['\" + pname + \"']['commands']['\"+cmdkey+\"'] -> \"+s)\"\"\"\n\treturn responses\n\n\n\ndef setup_funcs(bot): #Initialize any variables and systems that we need later.\n\t#if bot.dev_mode is True:\n\t#\tbot.db_name = bot.db_name + \"_dev\"\n\tdb_user = 'claribot0'\n\t#Set up database stuff\n\tglobal session\n\tengine = create_engine('mysql+pymysql://{0}:{1}@{2}/{3}?charset=utf8mb4'.format(bot.db_username,bot.db_pass,bot.db_ip,bot.db_name),isolation_level=\"READ COMMITTED\")\n\tsession = sessionmaker(bind=engine)\n\tbot.mysql = Object()\n\tbot.mysql.engine = engine\n\tbot.mysql.cursor = bot.get_cursor\n\t#Setup functions and data.\n\tbot.data = data.Data(bot)\n\tbot.imaging = imaging.ImageManip()\n\tbot.funcs = funcs.Funcs(bot)\n\tbot.responses = get_responses()\n\tbot.AdvChecks = checks.AdvChecks(bot) #this is a bit of a cheat/hack to pass the bot instance to the checks.py file, but it works!\n\tbot.remove_command(\"help\") #We will replace it with our own help command later\n\tbot.fatokens = ()\n\nclass Claribot(commands.AutoShardedBot):\n\n\tdef __init__(self,*args,**kwargs):\n\t\t#Deal with asyncio platform differences\n\t\tif sys.platform == \"win32\":\n\t\t\tself.loop = kwargs.pop('loop', asyncio.ProactorEventLoop())\n\t\t\tasyncio.set_event_loop(self.loop)\n\t\telse:\n\t\t\tself.loop = kwargs.pop('loop', asyncio.get_event_loop())\n\t\t\tasyncio.get_child_watcher().attach_loop(self.loop)\n\t\tcommand_prefix = kwargs.pop('commandPrefix', commands.when_mentioned_or('$'))\n\n\t\tintents = discord.Intents.default()\n\t\t#Unvalidated input is bad...\n\t\tintents.presences = kwargs.pop('enable_presences', False)\t\n\t\tintents.members = kwargs.pop('enable_members', False)\n\n\t\t#Initialize the bot with all the parameters\n\t\tsuper().__init__(command_prefix=command_prefix, intents=intents, *args, **kwargs)\n\t\t#Deal with variables\n\t\tself.token = kwargs.pop('token')\n\t\tself.owner = None #will be automatically retrieved later\n\t\tself.dev_mode = kwargs.pop('dev_mode',False)\n\t\tself.db_pass = kwargs.pop('dbPass')\n\t\tself.db_ip = kwargs.pop('db_ip','localhost')\n\t\tself.db_name = kwargs.pop('db_name','claribot')\n\t\tself.db_username = kwargs.pop('db_username','claribot_user')\n\t\tself.ms_face_key = kwargs.pop('ms_face_key', '')\n\n\n\t\"\"\"\n\tasync def command_help(self,ctx): #Format help for a command if needed\n\t\tif ctx.invoked_subcommand:\n\t\t\tcmd = ctx.invoked_subcommand\n\t\telse:\n\t\t\tcmd = ctx.command\n\t\tpages = await self.formatter.format_help_for(ctx, cmd)\n\t\tfor page in pages:\n\t\t\tawait ctx.message.channel.send(page.replace(\"\\n\", \"fix\\n\", 1))\n\t\"\"\"\n\n\tasync def on_ready(self): #When the bot has logged in and is ready\n\t\tsetup_funcs(self) #Initialize functions\n\t\tfor cog in modules: #Load cogs\n\t\t\ttry:\n\t\t\t\tself.load_extension(cog)\n\t\t\texcept Exception as e:\n\t\t\t\tmsg = Fore.RED + '======COG ERROR======\\nModule: {0}\\n{1}: {2}\\n====================='.format(cog,type(e).__name__,e)\n\t\t\t\tprint(msg)\n\t\tplaying = self.data.DB.get_bot_setting('playing')\n\t\tif not playing:\n\t\t\tplaying = \"Database Errors\" #If there was an error getting the playing status, use this instead\n\t\ttokens = self.data.DB.get_bot_setting('fatokens')\n\t\tif tokens:\n\t\t\tself.fatokens = tokens.split(\";\")\n\t\tout = Fore.GREEN + \"------\\n{0}\\n{1}\\nPlaying: {2}\\nDeveloper Mode: {3}\\n------\".format(self.user,(\"Shard: {0}/{1}\".format(self.shard_id,self.shard_count-1)) if self.shard_id is not None else \"Shard: ==AUTO SHARDED==\",playing,\"TRUE\" if self.dev_mode else \"FALSE\") + Style.RESET_ALL\n\t\tprint(out)\n\t\tawait self.change_presence(activity=discord.Game(name=playing)) #Set playing status\n\n\tasync def on_message(self,message): #Triggers whenever a message is sent.\n\t\tawait self.wait_until_ready()\n\t\tif self.owner is None:\n\t\t\tapp_info = await self.application_info()\n\t\t\tself.owner = app_info.owner\n\t\tif (self.dev_mode and message.author != self.owner) or message.author.bot:\n\t\t\treturn\n\t\tprefix = self.data.DB.get_prefix(message=message) #Get the server's prefix\n\t\thandle_owo = True\n\t\tmentioned = message.content.startswith(self.user.mention)\n\t\tif (message.content.lower().startswith(prefix) or mentioned) and message.content.lower() != prefix: #Get if the message starts with the bot's mention or the guilds prefix\n\t\t\tif mentioned:\n\t\t\t\tmessage.content = \"$\" + message.content[len(self.user.mention):].strip()\n\t\t\t\tprefix = \"$\"\n\t\t\tcontext = await self.funcs.overides.get_context(message,prefix)\n\t\t\tblacklisted = self.funcs.main.is_blacklisted(guild=message.guild,message=message,command=context.command)\n\t\t\tif context.command:\n\t\t\t\tif context.command.name in (\"owo\",\"uwu\"):\n\t\t\t\t\thandle_owo = False\n\t\t\tif blacklisted:\n\t\t\t\treturn\n\t\t\tawait self.funcs.overides.process_commands(message,prefix)\n\t\telse:\n\t\t\tdads = re.findall(r\"\\bI'?\\s*a?m\\s([^.|?|!]+)\",message.content,re.IGNORECASE)\n\t\t\tif len(dads) >= 1:\n\t\t\t\tdad = dads[0]\n\t\t\t\tif self.data.DB.get_serveropt(message.guild,\"dad_mode\",default=False,errors=False):\n\t\t\t\t\tif len(dad) > 1900:\n\t\t\t\t\t\tdad = \"Mr. Long Message\"\n\t\t\t\t\tawait message.channel.send(\"Hi \\\"{0}\\\", I'm {1}.\".format(dad,message.guild.me.display_name))\n\t\tif handle_owo:\n\t\t\towo_success = self.funcs.main.handle_owo(message)\n\n\tasync def on_command_completion(self,ctx):\n\t\tif ctx.command.root_parent:\n\t\t\tself.data.counters.update_command_usage(ctx.command.root_parent.name+\"_\"+ctx.command.name)\n\t\telse:\n\t\t\tself.data.counters.update_command_usage(ctx.command.name)\n\n\tasync def on_command_error(self,ctx,e): #If a command errors out, error names explain it all.\n\t\t#print(\"Command Error ({0}): `{1}`\".format(type(e).__name__,e))\n\t\tif isinstance(e, commands.CommandNotFound):\n\t\t\treturn\n\t\telif isinstance(e, commands.CommandOnCooldown):\n\t\t\ts = e.retry_after\n\t\t\th, rm = divmod(s,3600)\n\t\t\tm, seconds = divmod(rm,60)\n\t\t\th,m = (round(h),round(m))\n\t\t\tm1 = m2 = \"\"\n\t\t\tif h > 0:\n\t\t\t\tm1 = \"{0}h\".format(h)\n\t\t\tif m > 0:\n\t\t\t\tm2 = \"{0}h\".format(m)\n\t\t\tafter = \"{0} {1} {2}s\".format(m1,m2,round(seconds,1))\n\t\t\tafter = after.strip()\n\t\t\tawait ctx.send(ctx.gresponses['cooldown'].format(after))\n\t\t\treturn\n\t\telif isinstance(e, discord.errors.Forbidden):\n\t\t\tawait ctx.send(ctx.gresponses['no_perms'])\n\t\telif isinstance(e, checks.No_NSFW):\n\t\t\tawait ctx.send(ctx.gresponses['no_nsfw'])\n\t\telif isinstance(e, checks.No_Admin):\n\t\t\tawait ctx.send(ctx.gresponses['no_admin'])\n\t\telif isinstance(e, checks.No_Mod):\n\t\t\tawait ctx.send(ctx.gresponses['no_mod'])\n\t\telif isinstance(e, checks.No_Special):\n\t\t\tawait ctx.send(ctx.gresponses['no_special'])\n\t\telif isinstance(e, checks.NSFW_Disabled):\n\t\t\tawait ctx.send(ctx.gresponses['nsfw_disabled'])\n\t\telif isinstance(e, checks.No_BotOwner):\n\t\t\tawait ctx.send(ctx.gresponses['no_bot_owner'])\n\t\telif isinstance(e, commands.BotMissingPermissions):\n\t\t\tawait ctx.send(ctx.gresponses['bot_missing_perms'].format(', '.join(e.missing_perms)))\n\t\telif isinstance(e, commands.NoPrivateMessage):\n\t\t\tawait ctx.send(ctx.gresponses['guild_only'])\n\t\telif isinstance(e, commands.MissingRequiredArgument) or isinstance(e, commands.BadArgument):\n\t\t\tawait ctx.send_help(ctx.command)\n\t\telif isinstance(e, checks.Not_E):\n\t\t\tpass\n\t\telse:\n\t\t\t#print(\"Command Error ({0}): `{1}`\".format(type(e).__name__,e))\n\t\t\t#await ctx.send(\"Command Error ({0}): `{1}`\".format(type(e).__name__,e))\n\t\t\tawait self.funcs.command.handle_error(ctx,e)\n\t\tctx.command.reset_cooldown(ctx)\n\n\tasync def on_guild_join(self,guild):\n\t\tchannel_names = [\"entry\",\"general\",\"general-chat\",\"general2\",\"general-2\",\"general-chat-2\"]\n\t\texclude = [\"rules\",\"announcements\",\"major-announcements\",\"important-announcements\"]\n\t\tmessage = \"*Hi there!* :wave:\\n\\nI'm Claribot, and I'm here to spice your server up a little bit.\\nI hope that you'll find good uses for my various features.\\n\\nYou can view my command list with **$help**\"\n\t\tfor channel in guild.channels:\n\t\t\tif channel.name in channel_names and not channel.name in exclude:\n\t\t\t\tawait channel.send(message)\n\t\t\t\treturn\n\t\tfor channel in guild.channels:\n\t\t\tif not channel.name in exclude:\n\t\t\t\tawait channel.send(message)\n\t\t\t\treturn\n\n\t@property\n\tdef get_cursor(self): #DB stuff\n\t\treturn session()\n\n\tdef run(self): #Actually run the bot\n\t\tsuper().run(self.token)\n\n\tdef die(self): #Gracefully shut the bot down\n\t\ttry:\n\t\t\tself.loop.stop()\n\t\t\tself.mysql.cursor.close_all()\n\t\t\tself.mysql.engine.dispose()\n\t\t\ttasks = asyncio.gather(*asyncio.Task.all_tasks(), loop=self.loop)\n\t\t\ttasks.cancel()\n\t\t\tself.loop.run_forever()\n\t\t\ttasks.exception()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 11024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "colorama.init", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 93, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.data.Data", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.data", "line_number": 99, "usage_type": "name"}, {"api_name": "utils.imaging.ImageManip", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.imaging", "line_number": 100, "usage_type": "name"}, {"api_name": "utils.funcs.Funcs", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.funcs", "line_number": 101, "usage_type": "name"}, {"api_name": "utils.checks.AdvChecks", "line_number": 103, "usage_type": "call"}, {"api_name": "utils.checks", "line_number": 103, "usage_type": "name"}, {"api_name": "discord.ext.commands.AutoShardedBot", "line_number": 107, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 107, "usage_type": "name"}, {"api_name": "sys.platform", "line_number": 111, "usage_type": "attribute"}, {"api_name": "asyncio.ProactorEventLoop", "line_number": 112, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop", "line_number": 113, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 115, "usage_type": "call"}, {"api_name": "asyncio.get_child_watcher", "line_number": 116, "usage_type": "call"}, {"api_name": "discord.ext.commands.when_mentioned_or", "line_number": 117, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 117, "usage_type": "name"}, {"api_name": "discord.Intents.default", "line_number": 119, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 119, "usage_type": "attribute"}, {"api_name": "colorama.Fore.RED", "line_number": 154, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 154, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 162, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 162, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 162, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 162, "usage_type": "name"}, {"api_name": "discord.Game", "line_number": 164, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 189, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 189, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.CommandNotFound", "line_number": 207, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 207, "usage_type": "name"}, {"api_name": "discord.ext.commands.CommandOnCooldown", "line_number": 209, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 209, "usage_type": "name"}, {"api_name": "discord.errors", "line_number": 223, "usage_type": "attribute"}, {"api_name": "utils.checks.No_NSFW", "line_number": 225, "usage_type": "attribute"}, {"api_name": "utils.checks", "line_number": 225, "usage_type": "name"}, {"api_name": "utils.checks.No_Admin", "line_number": 227, "usage_type": "attribute"}, {"api_name": "utils.checks", "line_number": 227, "usage_type": "name"}, {"api_name": "utils.checks.No_Mod", "line_number": 229, "usage_type": "attribute"}, {"api_name": "utils.checks", "line_number": 229, "usage_type": "name"}, {"api_name": "utils.checks.No_Special", "line_number": 231, "usage_type": "attribute"}, {"api_name": "utils.checks", "line_number": 231, "usage_type": "name"}, {"api_name": "utils.checks.NSFW_Disabled", "line_number": 233, "usage_type": "attribute"}, {"api_name": "utils.checks", "line_number": 233, "usage_type": "name"}, {"api_name": "utils.checks.No_BotOwner", "line_number": 235, "usage_type": "attribute"}, {"api_name": "utils.checks", "line_number": 235, "usage_type": "name"}, {"api_name": "discord.ext.commands.BotMissingPermissions", "line_number": 237, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 237, "usage_type": "name"}, {"api_name": "discord.ext.commands.NoPrivateMessage", "line_number": 239, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 239, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 241, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 241, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 241, "usage_type": "attribute"}, {"api_name": "utils.checks.Not_E", "line_number": 243, "usage_type": "attribute"}, {"api_name": "utils.checks", "line_number": 243, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 276, "usage_type": "call"}, {"api_name": "asyncio.Task.all_tasks", "line_number": 276, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 276, "usage_type": "attribute"}]} +{"seq_id": "202875053", "text": "import sys\nimport pygame\nfrom pygame.locals import * # Siin asuvad konstandid nagu QUIT, MOUSEMOTION, KEYDOWN...\nimport random\nimport time\n\n###################################################### KONSTANDID ######################################################\n\nNIMI = \"Sirtet\"\nFPS = 25\nAKNA_LAIUS = 640\nAKNA_KÕRGUS = 480\nRUUDU_SUURUS = 20\nRUUDUSTIKU_LAIUS = 10\nRUUDUSTIKU_KÕRGUS = 20\nRAAMI_PAKSUS = 10 # Raam eraldab ruudustikku (mängulauda) muust.\nX_VARU = int((AKNA_LAIUS - RUUDUSTIKU_LAIUS * RUUDU_SUURUS) / 2)\nY_VARU = int((AKNA_KÕRGUS - RUUDUSTIKU_KÕRGUS * RUUDU_SUURUS) - RAAMI_PAKSUS) # X ja Y varu - mängulaua joonistamiseks\nTÜHI = '.'\nKÕRVALELIIKUMISE_SAGEDUS = 0.2 # Kui kasutaja hoiab vastavat nuppu all, liigub klots iga 0,2 s jooksul ühe ruudu võrra.\nKUKKUMISE_SAGEDUS = 0.1 # Kui kasutaja hoiab \"nool alla\" nuppu, liigub klots iga 0,1 s ühe ruudu võrra allapoole.\n\n# värvid\nMUST = (0,0,0)\nVALGE = (255,255,255)\nPUNANE = (219,51,51)\nROHELINE = (49,204,90)\nSININE = (36,119,234)\nKOLLANE = (229,231,53)\nTAUSTAVÄRV = (3,8,32)\nRAAMI_VÄRV = (8,20,81)\nTEKSTI_VÄRV = (246,250,255)\n\nKLOTSIDE_VÄRVID = (PUNANE, ROHELINE, SININE, KOLLANE)\n\n# erinevad klotsid\nMALLI_LAIUS = 5\nMALLI_KÕRGUS = 5 # Üks klots on list, milles on viis elementi. Nendeks elementideks on sõned pikkusega 5.\n\n# Iga kujuga klotsi jaoks on list, mis sisaldab omakorda listina kõiki selle võimalikke paiknevusi.\n# I_MALL[0][0][2] == \"X\" <-- See tuleks True. Klotse lauale lisades ja joonistamisel saab seda kasutada.\nI_MALL = \\\n[['..X..',\n '..X..',\n '..X..',\n '..X..',\n '.....'],\n ['.....',\n '.....',\n 'XXXX.',\n '.....',\n '.....']]\n\nL_MALL = \\\n[['.....',\n '...X.',\n '.XXX.',\n '.....',\n '.....'],\n ['.....',\n '..X..',\n '..X..',\n '..XX.',\n '.....'],\n ['.....',\n '.....',\n '.XXX.',\n '.X...',\n '.....'],\n ['.....',\n '.XX..',\n '..X..',\n '..X..',\n '.....']]\n\nJ_MALL = \\\n[['.....',\n '.X...',\n '.XXX.',\n '.....',\n '.....'],\n ['.....',\n '..XX.',\n '..X..',\n '..X..',\n '.....'],\n ['.....',\n '.....',\n '.XXX.',\n '...X.',\n '.....'],\n ['.....',\n '..X..',\n '..X..',\n '.XX..',\n '.....']]\n\nO_MALL = \\\n[['.....',\n '.....',\n '.XX..',\n '.XX..',\n '.....']]\n\nS_MALL = \\\n[['.....',\n '.....',\n '..XX.',\n '.XX..',\n '.....'],\n ['.....',\n '..X..',\n '..XX.',\n '...X.',\n '.....']]\n\nT_MALL = \\\n[['.....',\n '..X..',\n '.XXX.',\n '.....',\n '.....'],\n ['.....',\n '..X..',\n '..XX.',\n '..X..',\n '.....'],\n ['.....',\n '.....',\n '.XXX.',\n '..X..',\n '.....'],\n ['.....',\n '..X..',\n '.XX..',\n '..X..',\n '.....']]\n\nZ_MALL = \\\n[['.....',\n '.....',\n '.XX..',\n '..XX.',\n '.....'],\n ['.....',\n '..X..',\n '.XX..',\n '.X...',\n '.....']]\n\n# Sõnastik KLOTSID sisaldab kõiki klotse ja nende asendeid. \"X\" sõnes tähendab täidetud ruutu ja \".\" tühja ruutu.\nKLOTSID = {\"I\": I_MALL, \"L\": L_MALL, \"J\": J_MALL, \"O\": O_MALL, \"S\": S_MALL, \"T\": T_MALL, \"Z\": Z_MALL}\n\n##################################################### FUNKTSIOONID #####################################################\n\ndef start():\n \"\"\"Alustab Pygame'i ja näitab startscreen'i. Defineerib tähtsad mõned tähtsad konstandid.\"\"\"\n pygame.init()\n global KELL, LÕUEND, VÄIKE_FONT, SUUR_FONT\n KELL = pygame.time.Clock()\n LÕUEND = pygame.display.set_mode((AKNA_LAIUS, AKNA_KÕRGUS))\n VÄIKE_FONT = pygame.font.Font(None, 30)\n SUUR_FONT = pygame.font.Font(None, 50)\n pygame.display.set_caption(NIMI)\n pygame.mixer.music.load(\"taustamuusika.ogg\")\n kuva_tekst(NIMI)\n while True:\n pygame.mixer.music.play(loops=10)\n alusta_mänguga()\n kuva_tekst(\"Mäng läbi! :-(\")\n\ndef alusta_mänguga():\n \"\"\"Seab vaikeväärtused ja sisaldab endas game loop'i.\"\"\"\n skoor = 0\n ruudustik = loo_tühi_ruudustik()\n alla_liikumine = False\n vasakule_liikumine = False\n paremale_liikumine = False\n viimane_allaliikumise_aeg = time.time() # Neid kolme läheb tingimuslausetes vaja.\n viimane_kõrvaleliikumise_aeg = time.time()\n viimane_kukkumise_aeg = time.time()\n level, kukkumise_sagedus = arvuta_level_ja_kukkumise_sagedus(skoor)\n kukkuv_klots = genereeri_uus_klots()\n järgmine_klots = genereeri_uus_klots() # Mängu alustades tuleb mõlemad gen-ida.\n\n while True: # GAME LOOP!\n if kukkuv_klots == None: # kukkuv_klots saab väärtuseks None, kui põhja kukkus.\n kukkuv_klots = järgmine_klots # Kukkuv klots vahetub.\n järgmine_klots = genereeri_uus_klots() # Gen-ime uue.\n viimane_kukkumise_aeg = time.time() # Reset\n\n if not ruudustik_vaba(ruudustik, kukkuv_klots):\n return # Lähme tagasi start()-is asuvasse tsüklisse, mäng on läbi.\n\n kontrolli_katkestust() # Ega ei taheta lõpetada?\n\n for event in pygame.event.get():\n # Teeme KEYUP ja KEYDOWN eraldi, et nuppu all hoides liikumine jätkuks.\n if event.type == KEYUP:\n if (event.key == K_p):\n # Vajutades \"p\", paneme mängu pausi peale.\n LÕUEND.fill(TAUSTAVÄRV)\n kuva_tekst(\"Jätkamiseks vajuta mõnd nuppu.\")\n viimane_kõrvaleliikumise_aeg = time.time()\n viimane_allaliikumise_aeg = time.time()\n viimane_kukkumise_aeg = time.time()\n elif event.key == K_LEFT:\n vasakule_liikumine = False\n elif event.key == K_RIGHT:\n paremale_liikumine = False\n elif event.key == K_DOWN:\n alla_liikumine = False\n\n elif event.type == KEYDOWN:\n # Kontrollime, et vasakule liikumine on võimalik. Liigutame vasakule lahutades 1.\n if event.key == K_LEFT and ruudustik_vaba(ruudustik, kukkuv_klots, X=-1):\n kukkuv_klots[\"x\"] -= 1\n vasakule_liikumine = True\n paremale_liikumine = False # Et me ei liiguks samas paremale...\n viimane_kõrvaleliikumise_aeg = time.time()\n elif event.key == K_RIGHT and ruudustik_vaba(ruudustik, kukkuv_klots, X=1):\n kukkuv_klots[\"x\"] += 1\n paremale_liikumine = True\n vasakule_liikumine = False\n viimane_kõrvaleliikumise_aeg = time.time()\n elif event.key == K_x or event.key == K_UP:\n kukkuv_klots[\"paiknevus\"] = (kukkuv_klots[\"paiknevus\"] + 1) % len(KLOTSID[kukkuv_klots[\"kuju\"]])\n # Teeme jäägiga jagamise. Kui esimene pool saab suuremaks kui üldse klotside erinevaid paiknemisi\n # on, saame % abil ühe.\n if not ruudustik_vaba(ruudustik, kukkuv_klots):\n # Kui midagi on ees.\n kukkuv_klots[\"paiknevus\"] = (kukkuv_klots[\"paiknevus\"] - 1) % len(KLOTSID[kukkuv_klots[\"kuju\"]])\n elif event.key == K_z: # vasakule\n kukkuv_klots[\"paiknevus\"] = (kukkuv_klots[\"paiknevus\"] - 1) % len(KLOTSID[kukkuv_klots[\"kuju\"]])\n if not ruudustik_vaba(ruudustik, kukkuv_klots):\n kukkuv_klots[\"paiknevus\"] = (kukkuv_klots[\"paiknevus\"] + 1) % len(KLOTSID[kukkuv_klots[\"kuju\"]])\n elif event.key == K_DOWN:\n # soft drop\n alla_liikumine = True\n if ruudustik_vaba(ruudustik, kukkuv_klots, Y=1):\n kukkuv_klots[\"y\"] += 1\n viimane_allaliikumise_aeg = time.time()\n elif event.key == K_SPACE:\n # hard drop\n alla_liikumine = False\n paremale_liikumine = False\n vasakule_liikumine = False\n for i in range(1, RUUDUSTIKU_KÕRGUS):\n if not ruudustik_vaba(ruudustik, kukkuv_klots, Y=i):\n break\n kukkuv_klots[\"y\"] += i - 1\n\n # liikumine, kui parem/vasak nuppu hoitakse all\n if (vasakule_liikumine, paremale_liikumine) and time.time() - viimane_kõrvaleliikumise_aeg > KÕRVALELIIKUMISE_SAGEDUS:\n if vasakule_liikumine and ruudustik_vaba(ruudustik, kukkuv_klots, X=-1):\n kukkuv_klots[\"x\"] -= 1\n elif paremale_liikumine and ruudustik_vaba(ruudustik, kukkuv_klots, X=1):\n kukkuv_klots[\"x\"] += 1\n viimane_kõrvaleliikumise_aeg = time.time()\n if alla_liikumine and time.time() - viimane_allaliikumise_aeg > KUKKUMISE_SAGEDUS and ruudustik_vaba(ruudustik, kukkuv_klots, Y=1):\n kukkuv_klots[\"y\"] += 1\n viimane_allaliikumise_aeg = time.time()\n\n # loomulik kukkumine\n if time.time() - viimane_kukkumise_aeg > kukkumise_sagedus:\n # Kas põhja kukkunud?\n if not ruudustik_vaba(ruudustik, kukkuv_klots, Y=1):\n # Kukkus põhja seega.\n lisa_ruudustikku(ruudustik, kukkuv_klots)\n skoor += eemalda_täis_rida(ruudustik)\n level, kukkumise_sagedus = arvuta_level_ja_kukkumise_sagedus(skoor)\n kukkuv_klots = None\n else:\n # Ei ole veel põhjas, liigume edasi.\n kukkuv_klots[\"y\"] += 1\n viimane_kukkumise_aeg = time.time()\n\n # JOONISTAME KÕIK\n LÕUEND.fill(TAUSTAVÄRV)\n joonista_ruudustik(ruudustik)\n joonista_statid(skoor, level)\n joonista_järgmine_klots(järgmine_klots)\n if kukkuv_klots != None:\n joonista_klots(kukkuv_klots)\n\n pygame.display.update()\n KELL.tick(FPS)\n\ndef asub_ruudustikul(x, y):\n \"\"\"Tagastab True, kui punkt asub ruudustikul. Vastasel juhul tagastab False.\"\"\"\n return x >= 0 and x < RUUDUSTIKU_LAIUS and y < RUUDUSTIKU_KÕRGUS\n\ndef ruudustik_vaba(ruudustik, klots, X=0, Y=0):\n \"\"\"Tagastab True, kui klots asub ruudustikul ja ta ei kattu olemasolevate täidetud ruutudega.\n Parameetriteks on ruudustiku list ja klotsi list (5x5 mõõtmetega).\n Kontrollime, kas mõni klotsi ruut kattub mängulaual (ruudustikul) juba asuva\n täidetud ruuduga. Klotsi andmeid sisaldavale 5x5 kasti\n ülemisele vasakpoolsele ruudule, mille asukohta mängulaual teame (koordinaadid (X,Y)), liidame\n kontrollitava täidetud ruudu vastava x/y koordinaadi.\"\"\"\n for x in range(MALLI_LAIUS):\n for y in range(MALLI_KÕRGUS):\n asub_ülevalpool_ruudustikku = (y + klots[\"y\"] + Y) < 0\n # Erand, kui asub ülalpool ruudustikku.\n if asub_ülevalpool_ruudustikku or KLOTSID[klots[\"kuju\"]][klots[\"paiknevus\"]][y][x] == TÜHI:\n continue\n if not asub_ruudustikul(x + klots[\"x\"] + X, y + klots[\"y\"] + Y):\n return False\n if ruudustik[x + klots[\"x\"] + X][y + klots[\"y\"] + Y] != TÜHI:\n return False\n return True\n\ndef arvuta_level_ja_kukkumise_sagedus(skoor):\n \"\"\"Vastavalt mängija skoorile arvutatakse level ja klotside kukkumise sagedus ehk\n sekundid, mille jooksul klots ühe ruudu võrra kukub. Iga täis saadud rea eest kasvab\n skoor ühe punkti võrra ja iga +10 punkti tagant kasvab level.\"\"\"\n level = int(skoor / 10) + 1 # Liidame ühe, et esimene lvl oleks ikka 1.\n\n # Võtame, et alguses kukub klots iga 0,3 s tagant ühe ruudu võrra.\n kukkumise_sagedus = 0.3 - (level * 0.02) # Mida kõrgem lvl seda kiiremini kukkuma hakkavad.\n # Kui kukkumise_sagedus peakski negatiivseks minema, ei juhtu midagi, kuna\n # me kontrollime, et aeg, mille jooksul kukkuv klots ühe ruudu võrra viimati kukkus,\n # on suurem, kui kukkumise_sagedus. Mingist hetkest klotside kukkumine lihtsalt\n # enam kiiremaks ei lähe.\n return level, kukkumise_sagedus\n\ndef loo_tühi_ruudustik():\n \"\"\"Loob tühja ruudustiku (list) ja tagastab selle.\"\"\"\n ruudustik = []\n for i in range(RUUDUSTIKU_LAIUS):\n ruudustik.append([TÜHI] * RUUDUSTIKU_KÕRGUS)\n return ruudustik\n\ndef rida_täis(ruudustik, y):\n \"\"\"Tagastab True, kui üks rida sai täis (st reas auke ei ole).\n Parameeter y näitab rida.\"\"\"\n for x in range(RUUDUSTIKU_LAIUS):\n if ruudustik[x][y] == TÜHI:\n return False\n return True\n\ndef eemalda_täis_rida(ruudustik):\n \"\"\"Otsib üles täitunud rea, eemaldab selle ja nihutab kõik ülesjääva ühe ruudurea võrra allapoole.\"\"\"\n eemaldatud_ridu = 0 # skoori arvutamiseks\n y = RUUDUSTIKU_KÕRGUS - 1 # Alustame ruudustiku põhjast.\n while y >= 0:\n if rida_täis(ruudustik, y):\n # Eemaldame selle rea ja liigutame ülejäänu ühe rea võrra allapoole.\n # Selleks kopeerime iga ülesse poole jääva rea ühe võrra alla.\n for liigutame in range(y, 0, -1):\n for x in range(RUUDUSTIKU_LAIUS):\n ruudustik[x][liigutame] = ruudustik[x][liigutame - 1]\n for x in range(RUUDUSTIKU_LAIUS):\n ruudustik[x][0] = TÜHI\n eemaldatud_ridu += 1\n else:\n y -= 1 # Järgmise rea juurde...\n\n return eemaldatud_ridu\n\ndef konverter(ruutx, ruuty):\n \"\"\"Konverteerib ruudu koordinaadid ekraani koordinaatideks (pikslid).\"\"\"\n x = X_VARU + (ruutx * RUUDU_SUURUS)\n y = Y_VARU + (ruuty * RUUDU_SUURUS)\n return x, y\n\ndef kontrolli_katkestust():\n \"\"\"Kui event queue's on \"QUIT\" event, sulgeme Pygame'i ja Pythoni.\"\"\"\n for event in pygame.event.get(QUIT):\n pygame.quit()\n sys.exit()\n\ndef kontrolli_nupuvajutust():\n \"\"\"Tagastab None, kui event queue's pole ühtegi KEYUP event'i.\"\"\"\n kontrolli_katkestust() # Kui kasutaja peaks tahtma väljuda.\n\n for event in pygame.event.get([KEYDOWN, KEYUP]): # Vaatame ainult KEYDOWN ja KEYUP evente.\n if event.type == KEYDOWN:\n return event.key\n else:\n return None\n\ndef loo_tekstiobjekt(tekst, font, värv):\n lõuend = font.render(tekst, True, värv)\n return lõuend, lõuend.get_rect()\n\ndef kuva_tekst(tekst):\n \"\"\"Kuvab ekraani keskele mingi teksti ja palub kasutajal mängu alustamiseks mõnd nuppu vajutada.\"\"\"\n teksti_lõuend, teksti_ristkülik = loo_tekstiobjekt(tekst, SUUR_FONT, TEKSTI_VÄRV)\n teksti_ristkülik.center = (int(AKNA_LAIUS / 2), int(AKNA_KÕRGUS / 2))\n LÕUEND.blit(teksti_lõuend, teksti_ristkülik)\n\n # Alustamiseks vajuta nuppu...\n nupuvajutus_lõuend, nupuvajutus_ristkülik = loo_tekstiobjekt(\"Mängu alustamiseks vajuta ükskõik mis nuppu.\", VÄIKE_FONT, TEKSTI_VÄRV)\n nupuvajutus_ristkülik.center = (int(AKNA_LAIUS / 2), int(AKNA_KÕRGUS / 2) + 50)\n LÕUEND.blit(nupuvajutus_lõuend, nupuvajutus_ristkülik)\n\n while kontrolli_nupuvajutust() == None:\n # Tahame, et tekst oleks senikaua ekraanil, kuni mängija mõnd nuppu vajutab.\n pygame.display.update()\n KELL.tick()\n\ndef genereeri_uus_klots():\n \"\"\"Genereerib klotsi ja selle klotsi värvi.\"\"\"\n klotside_kujud = KLOTSID.keys() # sõnastiku võtmed\n kuju = random.choice(list(klotside_kujud)) # Valime mingi suvalise klotsi kuju.\n klotsi_värv = random.randint(0, len(KLOTSIDE_VÄRVID)-1) # Igale värvile vastab mingi number (indeks).\n klotsi_paiknevus = random.randint(0, len(KLOTSID[kuju]) - 1)\n\n # Klotsid defineerime sõnastikena.\n uus_klots = {\"kuju\": kuju, \"x\": (int(RUUDUSTIKU_LAIUS / 2) - int(MALLI_LAIUS / 2)), \"y\": -2, \"värv\": klotsi_värv, \"paiknevus\": klotsi_paiknevus}\n\n return uus_klots\n\ndef lisa_ruudustikku(ruudustik, klots):\n for x in range(MALLI_LAIUS):\n for y in range(MALLI_KÕRGUS):\n if KLOTSID[klots[\"kuju\"]][klots[\"paiknevus\"]][y][x] != TÜHI:\n ruudustik[x + klots[\"x\"]][y + klots[\"y\"]] = klots[\"värv\"]\n\ndef joonista_ruut(ruutx, ruuty, värv, x=None, y=None):\n \"\"\"Joonistab ühe ruudu koordinaatidega (ruutx,ruuty).\"\"\"\n if värv == TÜHI:\n return\n if x == None and y == None:\n x, y = konverter(ruutx, ruuty)\n pygame.draw.rect(LÕUEND, KLOTSIDE_VÄRVID[värv], (x + 1, y + 1, RUUDU_SUURUS - 1, RUUDU_SUURUS - 1))\n\ndef joonista_ruudustik(ruudustik):\n \"\"\"Joonistab ruudustiku, raami selle ümber ja klotsid.\"\"\"\n # äär ruudustiku ümber\n pygame.draw.rect(LÕUEND, RAAMI_VÄRV, (X_VARU - 3, Y_VARU - 7, (RUUDUSTIKU_LAIUS * RUUDU_SUURUS) + 8, (RUUDUSTIKU_KÕRGUS * RUUDU_SUURUS) + 8), 5)\n\n # Värvime ruudustiku tausta.\n pygame.draw.rect(LÕUEND, TAUSTAVÄRV, (X_VARU, Y_VARU, RUUDU_SUURUS * RUUDUSTIKU_LAIUS, RUUDU_SUURUS * RUUDUSTIKU_KÕRGUS))\n\n # ruutude joonistamine\n for x in range(RUUDUSTIKU_LAIUS):\n for y in range(RUUDUSTIKU_KÕRGUS):\n joonista_ruut(x, y, ruudustik[x][y])\n\ndef joonista_klots(klots, x=None, y=None):\n joonistatav_klots = KLOTSID[klots[\"kuju\"]][klots[\"paiknevus\"]]\n if x == None and y == None:\n x, y = konverter(klots[\"x\"], klots[\"y\"])\n\n # Joonistame ruudud, mis moodustavad klotsi.\n for i in range(MALLI_LAIUS):\n for j in range(MALLI_KÕRGUS):\n if joonistatav_klots[j][i] != TÜHI:\n joonista_ruut(None, None, klots[\"värv\"], x + (i * RUUDU_SUURUS), y + (j * RUUDU_SUURUS))\n\ndef joonista_statid(skoor, level):\n \"\"\"Kuvab ekraanile skoori ja leveli.\"\"\"\n skoori_lõuend = VÄIKE_FONT.render(\"Sinu skoor: %s\" % skoor, True, TEKSTI_VÄRV)\n skoori_ristkülik = skoori_lõuend.get_rect()\n skoori_ristkülik.topleft = (AKNA_LAIUS - 180, 20)\n LÕUEND.blit(skoori_lõuend, skoori_ristkülik)\n\n leveli_lõuend = VÄIKE_FONT.render(\"Level: %s\" % level, True, TEKSTI_VÄRV)\n leveli_ristkülik = leveli_lõuend.get_rect()\n leveli_ristkülik.topleft = (AKNA_LAIUS - 180, 50)\n LÕUEND.blit(leveli_lõuend, leveli_ristkülik)\n\ndef joonista_järgmine_klots(klots):\n \"\"\"Joonistab järgmise klotsi ruudustiku kõrvale.\"\"\"\n järgmise_klotsi_lõuend = VÄIKE_FONT.render(\"Järgmine klots:\", True, TEKSTI_VÄRV)\n järgmise_klotsi_ristkülik = järgmise_klotsi_lõuend.get_rect()\n järgmise_klotsi_ristkülik.topleft = (AKNA_LAIUS - 180, 90)\n LÕUEND.blit(järgmise_klotsi_lõuend, järgmise_klotsi_ristkülik)\n joonista_klots(klots, x=AKNA_LAIUS-150, y=120)\n\nstart()", "sub_path": "sirtet.py", "file_name": "sirtet.py", "file_ext": "py", "file_size_in_byte": 18114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 158, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 161, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 162, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 168, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 179, "usage_type": "call"}, {"api_name": "time.time", "line_number": 180, "usage_type": "call"}, {"api_name": "time.time", "line_number": 181, "usage_type": "call"}, {"api_name": "time.time", "line_number": 190, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 197, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 197, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 204, "usage_type": "call"}, {"api_name": "time.time", "line_number": 205, "usage_type": "call"}, {"api_name": "time.time", "line_number": 206, "usage_type": "call"}, {"api_name": "time.time", "line_number": 220, "usage_type": "call"}, {"api_name": "time.time", "line_number": 225, "usage_type": "call"}, {"api_name": "time.time", "line_number": 242, "usage_type": "call"}, {"api_name": "time.time", "line_number": 254, "usage_type": "call"}, {"api_name": "time.time", "line_number": 259, "usage_type": "call"}, {"api_name": "time.time", "line_number": 260, "usage_type": "call"}, {"api_name": "time.time", "line_number": 262, "usage_type": "call"}, {"api_name": "time.time", "line_number": 265, "usage_type": "call"}, {"api_name": "time.time", "line_number": 276, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 286, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 286, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 368, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 368, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 369, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 370, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 376, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 376, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 399, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 399, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 405, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 406, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 407, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 426, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 426, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 431, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 431, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 434, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 434, "usage_type": "attribute"}]} +{"seq_id": "612337124", "text": "# automated tests for clan_parser.py\n\nfrom nose.tools import *\nfrom mock import patch\nfrom wl_parsers.clan_parser import *\n\n# utility\n\ndef setDataToParser(data):\n parser = ClanParser(0)\n parser.pageData = data\n return parser\n\ndef readFile(filename, mode=\"r\"):\n with open(filename, mode) as fin:\n return fin.read()\n\n# fetch data\nallClans = readFile(\"wl_parsers/test/data/allClans.txt\")\nclan_CORP = readFile(\"wl_parsers/test/data/clan_CORP.txt\")\nclan_league = readFile(\"wl_parsers/test/data/clan_leagueOfNations.txt\")\nclan_nonexistent = readFile(\"wl_parsers/test/data/clan_nonexistent.txt\")\n\n# create parsers\nparser_CORP = setDataToParser(clan_CORP)\nparser_league = setDataToParser(clan_league)\nparser_nonexistent = setDataToParser(clan_nonexistent)\n\n# classless function tests\n\nclass FakeRequest(object):\n\n def __init__(self, data):\n self.text = data\n\nclanPage = FakeRequest(data=allClans)\n\n@patch(\"wl_parsers.clan_parser.requests.get\")\ndef test_getClans(reqGet):\n reqGet.return_value = clanPage\n assert(12 in getClans())\n assert(490490409 not in getClans())\n\n# decorator test\n\nclass DummyClass(object):\n\n def __init__(self, exists):\n self.__exists = exists\n\n @property\n def exists(self):\n return self.__exists\n\n @checkNonexistent\n def dummyFn(self, *args):\n return len(args)\n\ndef test_checkNonexistent():\n goodDummy = DummyClass(True)\n badDummy = DummyClass(False)\n assert_equals(goodDummy.dummyFn(1,2,3), 3)\n assert_equals(goodDummy.dummyFn(), 0)\n assert_equals(badDummy.dummyFn(), None)\n assert_equals(badDummy.dummyFn(1,2,3), None)\n\n# method tests\n\n## constructor test\n@patch('wl_parsers.clan_parser.ClanParser.makeURL')\ndef test_clanParser(makeURL):\n clanID = \"clanID\"\n clanParser = ClanParser(clanID)\n assert_equals(clanParser.ID, clanID)\n assert_equals(clanParser.URL, makeURL.return_value)\n\ndef test_exists():\n assert_true(parser_CORP.exists)\n assert_true(parser_league.exists)\n assert_false(parser_nonexistent.exists)\n\ndef test_name():\n assert_equals(parser_CORP.name, \"CORP\")\n assert_equals(parser_league.name, \"League of Nations\")\n assert_equals(parser_nonexistent.name, None)\n\ndef test_size():\n assert_equals(parser_CORP.size, 86)\n assert_equals(parser_league.size, 23)\n assert_equals(parser_nonexistent.size, None)\n\ndef test_link():\n assert_equals(parser_CORP.link, \n \"https://www.warlight.net/Map/23141-Empire-CORP\")\n assert_equals(parser_league.link, \n \"http://leagueofnationswarlight.weebly.com\")\n assert_equals(parser_nonexistent.link, None)\n\n@patch('wl_parsers.clan_parser.ClanParser.getValueFromBetween')\ndef test_link_edgecase(getVal):\n getVal.return_value = \"http://\"\n assert_equals(parser_league.link, \"\")\n\ndef test_tagline():\n assert_equals(parser_CORP.tagline, \n 'Like a phoenix, we'll always reborn from our ashes')\n assert_equals(parser_league.tagline, \"Many nations, one vision.\")\n assert_equals(parser_nonexistent.tagline, None)\n\ndef test_createdDate():\n assert_equals(parser_CORP.createdDate, \n datetime.date(year=2016, month=6, day=30))\n assert_equals(parser_league.createdDate, \n datetime.date(year=2014, month=2, day=2))\n assert_equals(parser_nonexistent.createdDate, None)\n\ndef test_bio():\n bio_CORP = parser_CORP.bio\n bio_league = parser_league.bio\n assert(\"Harambe\" not in bio_CORP)\n assert(\"Welcome to CORP!\" in bio_CORP)\n assert(\"undergoing serious changes\" in bio_league)\n assert_equals(parser_nonexistent.bio, None)\n\ndef test_members():\n members_CORP = parser_CORP.members\n assert_equals(len(members_CORP), 86)\n assert({'ID': 7849604602, 'name': \"Spooky\", \n 'title': \"4spooky16me\", 'isMember': False} in members_CORP)\n assert({'ID': 7610648981, 'name': \"siirial\", \n 'title': \"keeler\", 'isMember': True} in members_CORP)\n members_league = parser_league.members\n assert_equals(len(members_league), 23)\n assert_equals(parser_nonexistent.members, None)", "sub_path": "wl_parsers/test/clan_parser_tests.py", "file_name": "clan_parser_tests.py", "file_ext": "py", "file_size_in_byte": 4090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "mock.patch", "line_number": 38, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 70, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "427503499", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 7 11:19:57 2018\n\n@author: Administrator\n\"\"\"\n\nfrom scipy.special import comb, perm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef Q(k, x):\n s = 0\n for i in range(0, (k+1)//2):\n s1 = x**(i+1) * (1-x)**(k-i)\n s2 = (1-x)**(i+1) * x**(k-i)\n s += comb(k, i) * (s1 + s2)\n return s/x\n\nx = np.arange(0.01, 0.5, 0.01)\nK = np.arange(1, 20, 2)\nfor k in K:\n result = []\n for i in x:\n result.append(Q(k, i))\n plt.plot(x, result, label = str(k))\n plt.legend()\nplt.show()\n\nK = np.arange(1, 30, 2)\nresult = []\nfor k in K:\n a = perm(k, k)\n b = perm((k-1)//2, (k-1)//2)\n result.append(a / (b*b * 2**(k-1)))\nplt.scatter(K, result)\nplt.show()\n\ndef f(k, x):\n s = 0\n for i in range((k+1)//2, k+1):\n s1 = x**(i) * (1-x)**(k-i)\n s += comb(k, i) * s1\n return (1-2*x)*s/x\n\nfor k in K:\n result = []\n for i in x:\n result.append(f(k, i))\n plt.plot(x, result, label = str(k))\n plt.legend()\nplt.show()", "sub_path": "Chapter6/exercise4.py", "file_name": "exercise4.py", "file_ext": "py", "file_size_in_byte": 1026, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "scipy.special.comb", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.special.perm", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.special.perm", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "scipy.special.comb", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "291184380", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('cart/', views.cart, name=\"cart\"),\n path('checkout/', views.checkout, name=\"checkout\"),\n path('about/', views.about, name=\"about\"),\n path('update_item/', views.updateItem, name=\"update_item\"),\n path('products/cpu/', views.cpu, name=\"cpu\"),\n path('products/gpu/', views.gpu, name=\"gpu\"),\n path('products/motherboard/', views.mobo, name=\"mobo\"),\n path('products/memory/', views.memory, name=\"memory\"),\n path('products/case/', views.case, name=\"case\"),\n path('products/storage/', views.storage, name=\"storage\"),\n path('products/psu/', views.psu, name=\"psu\"),\n path('products/cooler/', views.cooler, name=\"cooler\"),\n path('products/caseaccessories/', views.caseacc, name=\"caseacc\"),\n path('products/headset/', views.headset, name=\"headset\"),\n path('products/keyboard/', views.keyboard, name=\"keyboard\"),\n path('products/monitor/', views.monitor, name=\"monitor\"),\n path('products/mouse/', views.mouse, name=\"mouse\"),\n path('products/<int:id>/', views.product, name='product'),\n path('search/', views.search, name='search'),\n path('login/', views.loginPage, name='login'),\n path('logout/', views.logUserOut, name='logout'),\n path('register/', views.registerPage, name='register'),\n path('delivery_process/', views.delivery_process, name='delivery_process'),\n path('purchase_summary/', views.purchase_summary, name='purchase_summary'),\n path('payment_confirmation/', views.payment_confirmation, name='payment_confirmation'),\n]\n", "sub_path": "items/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1606, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "459515690", "text": "import time\nimport datetime\nimport logger\nimport tweepy\n\nfrom handler import tw_handler\nfrom services import utils\nfrom agents_platform import settings\n\n\n# twitter_subscriptions = settings.Storage.twitter_subscriptions\n\n\nclass TwitterStream(tweepy.StreamListener):\n\n def __init__(self):\n super(TwitterStream, self).__init__()\n self.auth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)\n self.auth.set_access_token(settings.ACCESS_TOKEN, settings.ACCESS_TOKEN_SECRET)\n\n self.api = tweepy.API(self.auth)\n self.last_time = datetime.datetime.now()\n self.data = []\n\n # def on_data(self, raw_data):\n # state = super(TwitterStream, self).on_data(raw_data)\n # if not state:\n # return state\n # re\n\n def on_status(self, tweet):\n self.handle_result(tweet)\n return True\n\n @utils.postpone\n def handle_result(self, tweet):\n twitter_subscriptions = utils.read_data('tw_subscriptions.json')\n for keyword in twitter_subscriptions:\n print(keyword)\n if keyword.lower() in tweet.text.lower():\n # if datetime.datetime.now() - self.last_time <= datetime.timedelta(seconds=1):\n # self.data.append(tweet)\n # print(\"not yet time \" + tweet.text)\n # return\n self.last_time = datetime.datetime.now()\n #call interface function with tweet and twitter_subscriptions[keyword]\n tw_handler(self.data, board=twitter_subscriptions[keyword])\n for tweet in self.data[:15]:\n print(tweet.text)\n self.data = []\n\n def on_error(self, status_code):\n if status_code == 420:\n return False\n\n\nclass TwitterAPI(object):\n RECENT, POPULAR, MIXED = 'recent', 'popular', 'mixed'\n\n def __init__(self):\n self.auth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)\n self.auth.set_access_token(settings.ACCESS_TOKEN, settings.ACCESS_TOKEN_SECRET)\n\n self.api = tweepy.API(self.auth)\n\n def fetch_trends(self, woeid=1, exclude=\"\"):\n result = self.api.trends_place(id=woeid, exclude=exclude)\n return result[0]\n\n def search(self, query, max_tweets, count=100, lang='en', show_user=False, **kwargs):\n if 'retweet' not in kwargs or not kwargs['retweet']:\n query += \" -filter:retweets\"\n result_type = kwargs[\"result_type\"] if \"result_type\" in kwargs else self.RECENT\n tweet_count = 0\n max_id = -1\n since_id = None\n while tweet_count < max_tweets:\n try:\n if max_id <= 0:\n if not since_id:\n new_tweets = self.api.search(q=query, count=count, lang=lang,\n show_user=show_user,\n result_type=result_type)\n\n else:\n new_tweets = self.api.search(q=query, count=count, lang=lang,\n since_id=since_id,\n show_user=show_user,\n result_type=result_type)\n else:\n if not since_id:\n new_tweets = self.api.search(q=query, count=count,\n max_id=str(max_id-1),\n show_user=show_user,\n result_type=result_type)\n else:\n new_tweets = self.api.search(q=query, count=count,\n max_id=str(max_id - 1),\n since_id=since_id,\n show_user=show_user,\n result_type=result_type)\n if not new_tweets:\n logger.info(__name__, \"No new tweets found\")\n break\n # since_id = new_tweets[-1].id\n max_id = new_tweets[-1].id\n tweet_count += len(new_tweets)\n if max_tweets - count <= tweet_count < max_tweets:\n result_type = self.POPULAR\n return new_tweets\n except tweepy.RateLimitError:\n logger.warning(__name__, \"Rate limit error\")\n time.sleep(15 * 60)\n except tweepy.TweepError as e:\n logger.error(__name__, \"TweepError: {}\".format(e))\n break\n\n\n", "sub_path": "agents_platform/services/twitter.py", "file_name": "twitter.py", "file_ext": "py", "file_size_in_byte": 4759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tweepy.StreamListener", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tweepy.OAuthHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "agents_platform.settings.CONSUMER_KEY", "line_number": 18, "usage_type": "attribute"}, {"api_name": "agents_platform.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "agents_platform.settings.CONSUMER_SECRET", "line_number": 18, "usage_type": "attribute"}, {"api_name": "agents_platform.settings.ACCESS_TOKEN", "line_number": 19, "usage_type": "attribute"}, {"api_name": "agents_platform.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "agents_platform.settings.ACCESS_TOKEN_SECRET", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "services.utils.read_data", "line_number": 37, "usage_type": "call"}, {"api_name": "services.utils", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "handler.tw_handler", "line_number": 47, "usage_type": "call"}, {"api_name": "services.utils.postpone", "line_number": 35, "usage_type": "attribute"}, {"api_name": "services.utils", "line_number": 35, "usage_type": "name"}, {"api_name": "tweepy.OAuthHandler", "line_number": 61, "usage_type": "call"}, {"api_name": "agents_platform.settings.CONSUMER_KEY", "line_number": 61, "usage_type": "attribute"}, {"api_name": "agents_platform.settings", "line_number": 61, "usage_type": "name"}, {"api_name": "agents_platform.settings.CONSUMER_SECRET", "line_number": 61, "usage_type": "attribute"}, {"api_name": "agents_platform.settings.ACCESS_TOKEN", "line_number": 62, "usage_type": "attribute"}, {"api_name": "agents_platform.settings", "line_number": 62, "usage_type": "name"}, {"api_name": "agents_platform.settings.ACCESS_TOKEN_SECRET", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 64, "usage_type": "call"}, {"api_name": "logger.info", "line_number": 103, "usage_type": "call"}, {"api_name": "tweepy.RateLimitError", "line_number": 111, "usage_type": "attribute"}, {"api_name": "logger.warning", "line_number": 112, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 113, "usage_type": "call"}, {"api_name": "tweepy.TweepError", "line_number": 114, "usage_type": "attribute"}, {"api_name": "logger.error", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "404975113", "text": "# Copyright (c) 2013, System Engineering Software Society\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the System Engineering Software Society nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED.\n# IN NO EVENT SHALL SYSTEM ENGINEERING SOFTWARE SOCIETY BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nWith the considered node it is possible to convert the data types of a number\nof selected columns in the incoming Table. In general, the columns in the\ninternal :ref:`Table` type can have the same data types that exist for numpy\narrays, except for numpy object type. For this node the list of available data\ntypes to convert to is restricted.\n\nThe following data types are available for conversion:\n - bool\n - float\n - int\n - str\n - unicode\n - datetime\n\n\nConverting strings to datetimes\n-------------------------------\nConverting a str/unicode column to datetime might require some extra thought if\nthe strings include time-zone information. The datetimes stored by Sympathy\nhave no time zone information (due to limitations in the underlying data\nlibraries), but Sympathy is able to use the time-zone information when creating\nthe datetime columns. This can be done in two different ways, which we call\n\"UTC\" and \"naive\".\n\ndatetime (UTC)\n##############\nThe option *datetime (UTC)* will calculate the UTC-time corresponding to each\ndatetime in the input column. This is especially useful when your data contains\ndatetimes from different time zones (a common reason for this is daylight\nsavings time), but when looking in the viewer, exports etc. the datetimes will\nnot be the same as in the input.\n\nFor example the string ``'2016-01-01T12:00:00+0100'`` will be stored as\n``2016-01-01T11:00:00`` which is the corresponding UTC time.\n\nThere is currently no standard way of converting these UTC datetimes back to\nthe localized datetime strings with time-zone information.\n\ndatetime (naive)\n################\nThe option *datetime (naive)* simply discards any time-zone information. This\ncorresponds pretty well to how we \"naively\" think of time when looking at a\nclock on the wall.\n\nFor example the string ``'2016-01-01T12:00:00+0100'`` will be stored as\n``2016-01-01T12:00:00``.\n\"\"\"\nimport pytz\nimport dateutil.parser\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom sympathy.api import qt as qt_compat\nQtCore = qt_compat.QtCore # noqa\nQtGui = qt_compat.import_module('QtGui') # noqa\nfrom sympathy.api import node as synode\nfrom sympathy.api.nodeconfig import Port, Ports, Tag, Tags\nfrom sympathy.api import exceptions\nfrom sympathy.api import table\n\n\ndef _matplotlib_dates():\n from matplotlib import dates as _mpl_dates\n return _mpl_dates\n\n\ndef _str_to_datetime_utc(x, replace=False):\n try:\n dt = dateutil.parser.parse(x)\n except ValueError:\n raise exceptions.SyDataError(\n '\"{}\" is not a supported time format.'.format(x))\n\n if dt.tzinfo is None:\n return np.datetime64(pytz.UTC.localize(dt))\n\n if replace:\n return np.datetime64(dt.replace(tzinfo=pytz.UTC))\n\n return np.datetime64(dt)\n\n\ndef _str_to_datetime_naive(x):\n return _str_to_datetime_utc(x, replace=True)\n\n\ndef to_string(column):\n return np.vectorize(str)(column)\n\n\ndef to_unicode(column):\n return np.vectorize(unicode)(column)\n\n\ndef to_datetime_common(column):\n if column.dtype.kind == 'M':\n return column\n elif column.dtype.kind == 'f':\n return np.array([_matplotlib_dates().num2date(x)\n for x in column.tolist()],\n dtype='datetime64[us]')\n else:\n return None\n\n\ndef to_datetime_utc(column):\n result = to_datetime_common(column)\n if result is not None:\n return result\n return np.vectorize(_str_to_datetime_utc)(column).astype('datetime64[us]')\n\n\ndef to_datetime_naive(column):\n result = to_datetime_common(column)\n if result is not None:\n return result\n return np.vectorize(_str_to_datetime_naive)(column).astype(\n 'datetime64[us]')\n\n\ndef to_int(column):\n return column.astype(np.int)\n\n\ndef atof(x):\n return np.float(x.replace(',', '.'))\n\n\ndef to_float(column):\n if column.dtype.kind == 'M':\n return np.array([_matplotlib_dates().date2num(x)\n for x in column.tolist()])\n\n try:\n return column.astype(np.float)\n except ValueError:\n converted_array = np.vectorize(atof)(column)\n return converted_array\n\n\ndef to_bool(column):\n return np.greater_equal(column, 1).astype(np.bool)\n\n\nTYPE_NAMES = {'b': 'bool',\n 'f': 'float',\n 'i': 'int',\n 'S': 'str',\n 'U': 'unicode',\n 'Mu': 'datetime (UTC)',\n 'Mn': 'datetime (naive)'}\n\nCONVERSION_OLD_TYPES_NEW_TYPES = {\n 'bool': 'b',\n 'float': 'f',\n 'int': 'i',\n 'str': 'S',\n 'unicode': 'U',\n 'datetime': 'Mu'}\n\nCONVERSIONS = {'b': defaultdict(lambda: to_bool),\n 'f': defaultdict(lambda: to_float),\n 'i': defaultdict(lambda: to_int),\n 'S': defaultdict(lambda: to_string),\n 'U': defaultdict(lambda: to_unicode),\n 'Mu': defaultdict(lambda: to_datetime_utc),\n 'Mn': defaultdict(lambda: to_datetime_naive)}\n\n\n# def extract(column, dtype):\n# return (column, (dtype, CONVERSIONS))\n\n\ndef convert_table_base(input_table, output_table, conversion):\n \"\"\"\n Convert table using convert_table with CONVERSIONS as only column\n conversion dictionary.\n\n Add data from input_table to output_table converting it according to\n conversion.\n \"\"\"\n conversion_base = dict(((k, (v, CONVERSIONS))\n for k, v in conversion.items()))\n return convert_table(input_table, output_table, conversion_base)\n\n\ndef convert_table(input_table, output_table, conversion, keep_other=True):\n \"\"\"\n Add data from input_table to output_table converting it according to\n conversion.\n\n >>> input_table = table.File()\n >>> output_table = table.File()\n >>> input_table.set_column_from_array('col1', np.array([1.1]))\n >>> input_table.set_column_from_array('col2', np.array([1]))\n >>> input_table.set_column_from_array('col3', np.array(['hi']))\n >>> conversion = {'col1': ('i', CONVERSIONS), 'col2': ('b', CONVERSIONS)}\n >>> convert_table(input_table, output_table, conversion)\n >>> print str(input_table)\n col1 float64\n col2 int64\n col3 |S2\n >>> '{0:0.1f}'.format(output_table.get_column_to_array('col1')[0])\n '1.1'\n >>> output_table.get_column_to_array('col2')[0]\n True\n >>> output_table.get_column_to_array('col3')[0]\n 'hi'\n \"\"\"\n columns = input_table.column_names()\n converted_columns = conversion.keys()\n\n for column in columns:\n if column in converted_columns:\n # Convert column\n output_table.set_column_from_array(column, convert_column(\n input_table.get_column_to_array(column), conversion[column]))\n elif keep_other:\n # Copy column\n output_table.update_column(column, input_table)\n\n output_table.set_attributes(input_table.get_attributes())\n output_table.set_name(input_table.get_name())\n\n\ndef convert_column(column, conversion):\n \"\"\"\n Convert column with conversion.\n Return converted column.\n \"\"\"\n target, convert = conversion\n origin = column.dtype.kind\n return convert[target][origin](column)\n\n\nclass ConvertTableColumns(synode.Node):\n \"\"\"\n Convert selected columns in Table to new specified data types.\n\n :Inputs:\n **port1** : Table\n Table with data.\n :Outputs:\n **port2** : Table\n Table with converted columns.\n :Configuration:\n **Select columns** :\n Select column to convert.\n **Select type** :\n Select type to convert selected column to.\n **Add** : button\n Add selected combination of type and columns to the Conversions\n window.\n **Remove** : button\n Remove selected item in Conversions window.\n **Preview** : button\n Test listed conversions in the Conversions window.\n **Conversions** :\n Visualise definded conversions to perform when node is executed.\n \"\"\"\n\n author = \"Erik der Hagopian <erik.hagopian@sysess.org>\"\n copyright = \"(c) 2013 System Engineering Software Society\"\n version = '1.0'\n\n name = 'Convert columns in Table'\n description = 'Convert selected columns in Table to new data types.'\n nodeid = 'org.sysess.sympathy.data.table.converttablecolumns'\n icon = 'select_table_columns.svg'\n tags = Tags(Tag.DataProcessing.TransformData)\n\n inputs = Ports([Port.Table(\n 'Input Table', name='port1', requiresdata=True)])\n outputs = Ports([Port.Table('Table with converted columns', name='port2')])\n\n parameters = synode.parameters()\n editor = synode.Util.selectionlist_editor('multi').value()\n editor['buttons'] = True\n editor['invertbutton'] = True\n parameters.set_list(\n 'in_column_list', label='Select columns',\n description='Select the columns to use', value=[],\n editor=editor)\n parameters.set_list(\n 'in_type_list', label='Select type',\n description='Select the type to use', value=[],\n editor=synode.Util.selectionlist_editor('single').value())\n parameters.set_list(\n 'out_column_list', label='Convert columns',\n description='Selected columns to convert', value=[],\n editor=synode.Util.selectionlist_editor('multi').value())\n parameters.set_list(\n 'out_type_list', label='Convert types',\n description='Selected types to use', value=[],\n editor=synode.Util.selectionlist_editor('multi').value())\n\n def update_parameters(self, old_params):\n for i, v in enumerate(old_params['out_type_list'].value):\n if v in CONVERSION_OLD_TYPES_NEW_TYPES:\n old_params['out_type_list'].value[i] = (\n CONVERSION_OLD_TYPES_NEW_TYPES[v])\n\n def exec_parameter_view(self, node_context):\n input_table = node_context.input['port1']\n if not input_table.is_valid():\n input_table = table.File()\n\n return ConvertTableColumnsWidget(\n input_table, node_context.parameters)\n\n def execute(self, node_context):\n self.run(node_context.parameters, node_context.input['port1'],\n node_context.output['port2'], True)\n\n def run(self, parameters, input_table, output_table, keep_other):\n columns = parameters['out_column_list'].value\n types = parameters['out_type_list'].value\n conversion = dict([(column, (dtype, CONVERSIONS))\n for column, dtype in\n zip(columns, types)])\n convert_table(input_table, output_table, conversion, keep_other)\n\n\nclass ConvertTablesColumns(ConvertTableColumns):\n name = 'Convert columns in Tables'\n description = 'Convert selected columns in Tables to new data types.'\n nodeid = 'org.sysess.sympathy.data.table.converttablescolumns'\n\n inputs = Ports([Port.Tables(\n 'Input Table', name='port1', requiresdata=True)])\n outputs = Ports([Port.Tables(\n 'Tables with converted columns', name='port2')])\n\n def exec_parameter_view(self, node_context):\n input_tables = node_context.input['port1']\n if not input_tables.is_valid():\n input_table = table.File()\n else:\n try:\n input_table = input_tables[0]\n except IndexError:\n input_table = table.File()\n\n return ConvertTableColumnsWidget(\n input_table, node_context.parameters)\n\n def execute(self, node_context):\n input_tables = node_context.input['port1']\n output_tables = node_context.output['port2']\n for input_table in input_tables:\n output_table = table.File()\n self.run(node_context.parameters, input_table, output_table, True)\n output_tables.append(output_table)\n\n\nclass ConvertTableColumnsWidget(QtGui.QWidget):\n def __init__(self, input_table, parameters, parent=None):\n super(ConvertTableColumnsWidget, self).__init__(parent)\n self._parameters = parameters\n self._input_table = input_table\n self._init_parameters()\n self._init_gui()\n self._connect_gui()\n\n def _init_parameters(self):\n self._convert_items = {}\n self._parameters['in_column_list'].value_names = []\n self._parameters['in_column_list'].value = []\n self._parameters['in_column_list'].list = (\n self._input_table.column_names())\n self._parameters['in_type_list'].list = TYPE_NAMES.values()\n self._parameters['in_type_list'].value_names = []\n self._parameters['in_type_list'].value = []\n\n def _init_gui(self):\n vlayout = QtGui.QVBoxLayout()\n selection_hlayout = QtGui.QHBoxLayout()\n button_hlayout = QtGui.QHBoxLayout()\n\n self.add_button = QtGui.QPushButton('Add')\n self.remove_button = QtGui.QPushButton('Remove')\n self.preview_button = QtGui.QPushButton('Preview')\n\n self.type_list = self._parameters['in_type_list'].gui()\n self.column_list = self._parameters['in_column_list'].gui()\n self.convert_list = QtGui.QListWidget()\n\n self.convert_label = QtGui.QLabel('Conversions')\n self.preview_label = QtGui.QLabel('Not previewed')\n\n self.convert_list.setSelectionMode(\n QtGui.QAbstractItemView.ExtendedSelection)\n\n self.convert_list.setAlternatingRowColors(True)\n\n for column, dtype in zip(self._parameters['out_column_list'].value,\n self._parameters['out_type_list'].value):\n label = u'{dtype} / {column}'.format(\n column=column, dtype=TYPE_NAMES[dtype])\n item = QtGui.QListWidgetItem(label)\n self.convert_list.addItem(item)\n self._convert_items[column] = item\n\n selection_hlayout.addWidget(self.column_list)\n selection_hlayout.addWidget(self.type_list)\n\n button_hlayout.addWidget(self.add_button)\n button_hlayout.addWidget(self.remove_button)\n button_hlayout.addWidget(self.preview_button)\n button_hlayout.addWidget(self.preview_label)\n\n vlayout.addLayout(selection_hlayout)\n vlayout.addLayout(button_hlayout)\n vlayout.addWidget(self.convert_label)\n vlayout.addWidget(self.convert_list)\n\n self.setLayout(vlayout)\n\n def _connect_gui(self):\n self.add_button.clicked.connect(self.add)\n self.remove_button.clicked.connect(self.remove)\n self.preview_button.clicked.connect(self.preview)\n\n def add(self):\n columns = self._parameters['in_column_list'].value_names\n type_name = self._parameters['in_type_list'].selected\n dtype = None\n for k, v in TYPE_NAMES.items():\n if v == type_name:\n dtype = k\n break\n if dtype is None:\n return\n\n for column in columns:\n label = u'{dtype} / {column}'.format(\n column=column, dtype=type_name)\n\n if column in self._convert_items:\n item = self._convert_items[column]\n index = self.convert_list.row(item)\n self._parameters['out_column_list'].value[index] = column\n self._parameters['out_type_list'].value[index] = dtype\n item.setText(label)\n else:\n item = QtGui.QListWidgetItem(label)\n self._convert_items[column] = item\n self._parameters['out_column_list'].value.append(column)\n self._parameters['out_type_list'].value.append(dtype)\n self.convert_list.addItem(item)\n\n def remove(self):\n for item in self.convert_list.selectedItems():\n index = self.convert_list.row(item)\n column = self._parameters['out_column_list'].value[index]\n del self._convert_items[column]\n del self._parameters['out_column_list'].value[index]\n del self._parameters['out_type_list'].value[index]\n self.convert_list.takeItem(index)\n\n def preview(self):\n input_table = self._input_table\n output_table = table.File()\n node = ConvertTableColumns()\n try:\n node.run(\n self._parameters, input_table, output_table, False)\n self.preview_label.setText('Ok!')\n self.preview_label.setStyleSheet('QLabel { color : black; }')\n except:\n self.preview_label.setText('Failed.')\n self.preview_label.setStyleSheet('QLabel { color : red; }')\n", "sub_path": "Library/Library/sympathy/data/table/node_convert_table_columns.py", "file_name": "node_convert_table_columns.py", "file_ext": "py", "file_size_in_byte": 18104, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sympathy.api.qt.QtCore", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sympathy.api.qt", "line_number": 81, "usage_type": "name"}, {"api_name": "sympathy.api.qt.import_module", "line_number": 82, "usage_type": "call"}, {"api_name": "sympathy.api.qt", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.dates", "line_number": 91, "usage_type": "name"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 96, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 96, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 96, "usage_type": "name"}, {"api_name": "sympathy.api.exceptions.SyDataError", "line_number": 98, "usage_type": "call"}, {"api_name": "sympathy.api.exceptions", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.datetime64", "line_number": 102, "usage_type": "call"}, {"api_name": "pytz.UTC.localize", "line_number": 102, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.datetime64", "line_number": 105, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.datetime64", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.vectorize", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 169, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 188, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 189, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 190, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 191, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 192, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 193, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 194, "usage_type": "call"}, {"api_name": "sympathy.api.node.Node", "line_number": 263, "usage_type": "attribute"}, {"api_name": "sympathy.api.node", "line_number": 263, "usage_type": "name"}, {"api_name": "sympathy.api.nodeconfig.Tags", "line_number": 297, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Tag.DataProcessing", "line_number": 297, "usage_type": "attribute"}, {"api_name": "sympathy.api.nodeconfig.Tag", "line_number": 297, "usage_type": "name"}, {"api_name": "sympathy.api.nodeconfig.Ports", "line_number": 299, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port.Table", "line_number": 299, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port", "line_number": 299, "usage_type": "name"}, {"api_name": "sympathy.api.nodeconfig.Ports", "line_number": 301, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port.Table", "line_number": 301, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port", "line_number": 301, "usage_type": "name"}, {"api_name": "sympathy.api.node.parameters", "line_number": 303, "usage_type": "call"}, {"api_name": "sympathy.api.node", "line_number": 303, "usage_type": "name"}, {"api_name": "sympathy.api.node.Util.selectionlist_editor", "line_number": 304, "usage_type": "call"}, {"api_name": "sympathy.api.node.Util", "line_number": 304, "usage_type": "attribute"}, {"api_name": "sympathy.api.node", "line_number": 304, "usage_type": "name"}, {"api_name": "sympathy.api.node.Util.selectionlist_editor", "line_number": 314, "usage_type": "call"}, {"api_name": "sympathy.api.node.Util", "line_number": 314, "usage_type": "attribute"}, {"api_name": "sympathy.api.node", "line_number": 314, "usage_type": "name"}, {"api_name": "sympathy.api.node.Util.selectionlist_editor", "line_number": 318, "usage_type": "call"}, {"api_name": "sympathy.api.node.Util", "line_number": 318, "usage_type": "attribute"}, {"api_name": "sympathy.api.node", "line_number": 318, "usage_type": "name"}, {"api_name": "sympathy.api.node.Util.selectionlist_editor", "line_number": 322, "usage_type": "call"}, {"api_name": "sympathy.api.node.Util", "line_number": 322, "usage_type": "attribute"}, {"api_name": "sympathy.api.node", "line_number": 322, "usage_type": "name"}, {"api_name": "sympathy.api.table.File", "line_number": 333, "usage_type": "call"}, {"api_name": "sympathy.api.table", "line_number": 333, "usage_type": "name"}, {"api_name": "sympathy.api.nodeconfig.Ports", "line_number": 356, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port.Tables", "line_number": 356, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port", "line_number": 356, "usage_type": "name"}, {"api_name": "sympathy.api.nodeconfig.Ports", "line_number": 358, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port.Tables", "line_number": 358, "usage_type": "call"}, {"api_name": "sympathy.api.nodeconfig.Port", "line_number": 358, "usage_type": "name"}, {"api_name": "sympathy.api.table.File", "line_number": 364, "usage_type": "call"}, {"api_name": "sympathy.api.table", "line_number": 364, "usage_type": "name"}, {"api_name": "sympathy.api.table.File", "line_number": 369, "usage_type": "call"}, {"api_name": "sympathy.api.table", "line_number": 369, "usage_type": "name"}, {"api_name": "sympathy.api.table.File", "line_number": 378, "usage_type": "call"}, {"api_name": "sympathy.api.table", "line_number": 378, "usage_type": "name"}, {"api_name": "sympathy.api.table.File", "line_number": 490, "usage_type": "call"}, {"api_name": "sympathy.api.table", "line_number": 490, "usage_type": "name"}]} +{"seq_id": "181747900", "text": "import torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\n\nimport collections\nfrom collections import OrderedDict\n\ndef init_dataloaders(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n train_transforms = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean = [0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225])\n ])\n test_transforms = transforms.Compose([\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean = [0.485, 0.456, 0.406], \n std = [0.229, 0.224, 0.225])\n ])\n\n # TODO: Load the datasets with ImageFolder\n train_data = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_data = datasets.ImageFolder(valid_dir, transform=test_transforms)\n test_data = datasets.ImageFolder(test_dir, transform=test_transforms)\n\n # TODO: Using the image datasets and the trainforms, define the dataloaders\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=64, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_data)\n\n return train_loader, valid_loader, test_loader\n\ndef get_model(arch):\n if arch == 'vgg16':\n model = models.vgg16(pretrained=True)\n fc_size = 25088\n elif arch == 'alexnet':\n model = models.alexnet(pretrained=True)\n fc_size = 9216\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True)\n fc_size = 1024\n # Freeze the parameters before returning the model\n for param in model.parameters():\n param.requires_grad = False \n return model, fc_size\n\ndef get_classifier(input_size, hidden_units):\n # Define the new classifier\n # Input size has to be passed in to match pre-trained network\n # First hidden layer is configurable\n # Remember to include Dropout probability\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(input_size, hidden_units)),\n ('relu1', nn.ReLU()),\n ('drpout1', nn.Dropout(p=0.3)),\n ('fc2', nn.Linear(hidden_units, 1024)),\n ('relu2', nn.ReLU()),\n ('drpout2', nn.Dropout(p=0.3)), \n ('logits', nn.Linear(1024, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n return classifier\n\n# define a test function to use for validation and/or test\ndef test_model(model, dataloader, criterion, key, device):\n accuracy = 0\n test_loss = 0\n for images, labels in dataloader:\n model.to(device)\n images, labels = images.to(device), labels.to(device)\n outputs = model.forward(images)\n test_loss += criterion(outputs, labels).item()\n # probabilities are the inverse log function of the output\n ps = torch.exp(outputs)\n equality = (labels.data == ps.max(dim=1)[1])\n accuracy += equality.type(torch.FloatTensor).mean()\n \n return test_loss, accuracy\n\ndef train_model(model, data_loader, valid_loader, learnrate, device, epochs):\n # Set a loss criterion and \n # set optimizer to only optimize a classifier\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), lr=learnrate)\n \n print_every = 20\n model.to(device)\n \n for e in range(epochs):\n running_loss = 0\n model.train()\n steps = 0\n with active_session():\n for (images, labels) in data_loader:\n steps += 1\n images, labels = images.to(device), labels.to(device)\n optimizer.zero_grad()\n outputs = model.forward(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n \n if (steps) % print_every == 0:\n model.eval()\n with torch.no_grad():\n test_loss, accuracy = test_model(model, valid_loader, criterion, \"Valid\", device)\n print(\"Epoch: {}/{}...\".format(e+1,epochs),\n \"Training Loss: {:.3f}\".format(running_loss/print_every),\n \"Validation Loss: {:.3f}\".format(test_loss/len(valid_loader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(valid_loader)))\n running_loss = 0\n model.train()\n return\n\ndef save_checkpoint(model, filename):\n # TODO: Save the checkpoint \n checkpoint = {\n 'model':model,\n 'state_dict':model.state_dict(),\n }\n torch.save(checkpoint, filename)\n return\n\ndef load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n model = checkpoint['model']\n model.load_state_dict(checkpoint['state_dict'])\n return model\n ", "sub_path": "nnetwork.py", "file_name": "nnetwork.py", "file_ext": "py", "file_size_in_byte": 5123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 15, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 17, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 32, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torchvision.models.vgg16", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.models.alexnet", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 47, "usage_type": "name"}, {"api_name": "torchvision.models.densenet121", "line_number": 50, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.exp", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.nn.NLLLoss", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "21210900", "text": "# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n\n\nfrom typing import Any, Callable, Dict, Optional, Union\n\nfrom sbi.inference.posteriors.base_posterior import NeuralPosterior\nfrom sbi.inference.snle.snle_base import LikelihoodEstimator\nfrom sbi.types import TensorboardSummaryWriter\nfrom sbi.utils import del_entries\n\n\nclass SNLE_A(LikelihoodEstimator):\n def __init__(\n self,\n simulator: Callable,\n prior,\n num_workers: int = 1,\n simulation_batch_size: int = 1,\n density_estimator: Union[str, Callable] = \"maf\",\n mcmc_method: str = \"slice_np\",\n mcmc_parameters: Optional[Dict[str, Any]] = None,\n device: str = \"cpu\",\n logging_level: Union[int, str] = \"WARNING\",\n summary_writer: Optional[TensorboardSummaryWriter] = None,\n show_progress_bars: bool = True,\n show_round_summary: bool = False,\n ):\n r\"\"\"Sequential Neural Likelihood [1].\n\n [1] Sequential Neural Likelihood: Fast Likelihood-free Inference with\n Autoregressive Flows_, Papamakarios et al., AISTATS 2019,\n https://arxiv.org/abs/1805.07226\n\n Args:\n simulator: A function that takes parameters $\\theta$ and maps them to\n simulations, or observations, `x`, $\\text{sim}(\\theta)\\to x$. Any\n regular Python callable (i.e. function or class with `__call__` method)\n can be used.\n prior: A probability distribution that expresses prior knowledge about the\n parameters, e.g. which ranges are meaningful for them. Any\n object with `.log_prob()`and `.sample()` (for example, a PyTorch\n distribution) can be used.\n num_workers: Number of parallel workers to use for simulations.\n simulation_batch_size: Number of parameter sets that the simulator\n maps to data x at once. If None, we simulate all parameter sets at the\n same time. If >= 1, the simulator has to process data of shape\n (simulation_batch_size, parameter_dimension).\n density_estimator: If it is a string, use a pre-configured network of the\n provided type (one of nsf, maf, mdn, made). Alternatively, a function\n that builds a custom neural network can be provided. The function will\n be called with the first batch of simulations (theta, x), which can\n thus be used for shape inference and potentially for z-scoring. It\n needs to return a PyTorch `nn.Module` implementing the density\n estimator. The density estimator needs to provide the methods\n `.log_prob` and `.sample()`.\n mcmc_method: Method used for MCMC sampling, one of `slice_np`, `slice`, `hmc`, `nuts`.\n Currently defaults to `slice_np` for a custom numpy implementation of\n slice sampling; select `hmc`, `nuts` or `slice` for Pyro-based sampling.\n mcmc_parameters: Dictionary overriding the default parameters for MCMC.\n The following parameters are supported: `thin` to set the thinning\n factor for the chain, `warmup_steps` to set the initial number of\n samples to discard, `num_chains` for the number of chains, `init_strategy`\n for the initialisation strategy for chains; `prior` will draw init\n locations from prior, whereas `sir` will use Sequential-Importance-\n Resampling using `init_strategy_num_candidates` to find init\n locations.\n device: torch device on which to compute, e.g. gpu, cpu.\n logging_level: Minimum severity of messages to log. One of the strings\n INFO, WARNING, DEBUG, ERROR and CRITICAL.\n summary_writer: A tensorboard `SummaryWriter` to control, among others, log\n file location (default is `<current working directory>/logs`.)\n show_progress_bars: Whether to show a progressbar during simulation and\n sampling.\n show_round_summary: Whether to show the validation loss and leakage after\n each round.\n \"\"\"\n\n kwargs = del_entries(locals(), entries=(\"self\", \"__class__\"))\n super().__init__(**kwargs)\n\n def __call__(\n self,\n num_simulations: int,\n proposal: Optional[Any] = None,\n training_batch_size: int = 50,\n learning_rate: float = 5e-4,\n validation_fraction: float = 0.1,\n stop_after_epochs: int = 20,\n max_num_epochs: Optional[int] = None,\n clip_max_norm: Optional[float] = 5.0,\n exclude_invalid_x: bool = True,\n discard_prior_samples: bool = False,\n retrain_from_scratch_each_round: bool = False,\n device='cpu'\n ) -> NeuralPosterior:\n r\"\"\"Run SNLE.\n\n Return posterior $p(\\theta|x)$ after inference.\n\n Args:\n num_simulations: Number of simulator calls.\n proposal: Distribution that the parameters $\\theta$ are drawn from.\n `proposal=None` uses the prior. Setting the proposal to a distribution\n targeted on a specific observation, e.g. a posterior $p(\\theta|x_o)$\n obtained previously, can lead to less required simulations.\n training_batch_size: Training batch size.\n learning_rate: Learning rate for Adam optimizer.\n validation_fraction: The fraction of data to use for validation.\n stop_after_epochs: The number of epochs to wait for improvement on the\n validation set before terminating training.\n max_num_epochs: Maximum number of epochs to run. If reached, we stop\n training even when the validation loss is still decreasing. If None, we\n train until validation loss increases (see also `stop_after_epochs`).\n clip_max_norm: Value at which to clip the total gradient norm in order to\n prevent exploding gradients. Use None for no clipping.\n exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞`\n during training. Expect errors, silent or explicit, when `False`.\n discard_prior_samples: Whether to discard samples simulated in round 1, i.e.\n from the prior. Training may be sped up by ignoring such less targeted\n samples.\n retrain_from_scratch_each_round: Whether to retrain the conditional density\n estimator for the posterior from scratch each round.\n\n Returns:\n Posterior $p(\\theta|x_o)$ that can be sampled and evaluated.\n \"\"\"\n kwargs = del_entries(locals(), entries=(\"self\", \"__class__\"))\n return super().__call__(**kwargs)\n", "sub_path": "sbi/inference/snle/snle_a.py", "file_name": "snle_a.py", "file_ext": "py", "file_size_in_byte": 6953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sbi.inference.snle.snle_base.LikelihoodEstimator", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "sbi.types.TensorboardSummaryWriter", "line_number": 25, "usage_type": "name"}, {"api_name": "sbi.utils.del_entries", "line_number": 79, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 90, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 91, "usage_type": "name"}, {"api_name": "sbi.utils.del_entries", "line_number": 128, "usage_type": "call"}, {"api_name": "sbi.inference.posteriors.base_posterior.NeuralPosterior", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "204708216", "text": "from ftw.builder import Builder\nfrom ftw.builder import create\nfrom opengever.testing import IntegrationTestCase\nfrom plone.locking.interfaces import IRefreshableLockable\nfrom zExceptions import Unauthorized\n\n\nclass TestDocumentQuickupload(IntegrationTestCase):\n\n def test_raises_unauthorized_when_document_is_not_checked_out(self):\n self.login(self.regular_user)\n with self.assertRaises(Unauthorized):\n create(Builder('quickuploaded_document')\n .within(self.document)\n .with_data('text'))\n\n def test_raises_unauthorized_when_document_is_locked(self):\n self.login(self.regular_user)\n IRefreshableLockable(self.document).lock()\n with self.assertRaises(Unauthorized):\n create(Builder('quickuploaded_document')\n .within(self.document)\n .with_data('text'))\n\n def test_file_is_updated(self):\n self.login(self.regular_user)\n self.checkout_document(self.document)\n create(Builder('quickuploaded_document')\n .within(self.document)\n .with_data('NEW DATA'))\n self.assertEquals('NEW DATA', self.document.file.data)\n\n def test_uses_existing_filename_but_new_extension(self):\n self.login(self.regular_user)\n self.checkout_document(self.document)\n create(Builder('quickuploaded_document')\n .within(self.document)\n .with_data('NEW DATA', filename='test.pdf'))\n self.assertEquals('Vertraegsentwurf.pdf', self.document.file.filename)\n", "sub_path": "opengever/document/tests/test_quickupload.py", "file_name": "test_quickupload.py", "file_ext": "py", "file_size_in_byte": 1570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "opengever.testing.IntegrationTestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "zExceptions.Unauthorized", "line_number": 12, "usage_type": "argument"}, {"api_name": "ftw.builder.create", "line_number": 13, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 13, "usage_type": "call"}, {"api_name": "plone.locking.interfaces.IRefreshableLockable", "line_number": 19, "usage_type": "call"}, {"api_name": "zExceptions.Unauthorized", "line_number": 20, "usage_type": "argument"}, {"api_name": "ftw.builder.create", "line_number": 21, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 21, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 28, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 28, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 36, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "578905121", "text": "\n# Native modules\nimport logging\nfrom typing import Optional, List, Callable\nfrom collections import namedtuple, OrderedDict\n\n# Package modules\nfrom .helpers import update_all_stops_from_getters, debug_functions, function_to_str, UpdateAllStopsFromGettersReturn\nfrom .exceptions import *\nfrom .assets import *\n\n__all__ = [\"PyBuses\", \"BusSortMethods\", \"logger\"]\n\nlogger = logging.getLogger(\"pybuses.core\")\nlog = logger\n\n\n\"\"\"Bus Sorting Methods are ints, but must be used with the aliases from the BusSortMethods Named Tuple.\nNONE = 0\nTIME = 1\nLINE = 2\nROUTE = 3\nLINE_INT = 4\nMIXED_LINE_ROUTE = 5\nMIXED_LINE_INT_ROUTE = 6\n\"\"\"\n_bus_sort_methods_namedtuple = namedtuple(\"BusSortMethods\", [\"NONE\", \"TIME\", \"LINE\", \"ROUTE\"])\nBusSortMethods = _bus_sort_methods_namedtuple(0, 1, 2, 3)\n\n\nclass PyBuses(object):\n \"\"\"A PyBuses object to help managing bus stops and look for incoming buses.\n This object should be threated as a concrete transport service, i.e.:\n - \"the bus service of King's Landing\"\n - \"the metro service of Liberty City\"\n - \"the train service of Hamburg\"\n - \"the bus service of Hamburg\"\n Each one of these services would have a PyBuses object, with their getters, setters and deleters.\n\n Getters are custom, required functions that fetch Stop info and Bus lists from certain sources.\n At least one Stop Getter and one Bus Getter are required to fetch stops and buses respectively.\n\n Setters are custom, optional functions, that will save found Stops or list of buses to custom destinations\n (i.e. a database, local variables, a file, cache...).\n Setters are supposed to be executed after a Stop or Bus query is successful, but this behaviour can be customized.\n\n Deleters are custom, optional functions, that will delete saved Stops or list of buses on custom destinations,\n usually the same places as Setters.\n\n The PyBuses methods that use these functions are:\n - Stop Getters: find_stop()\n - Stop Setters: save_stop()\n - Stop Deleters: delete_stop()\n - Bus Getters: get_buses()\n - Bus Setters: save_buses()\n - Bus Deleters: delete_buses()\n Bus Setters and Bus Deleters functionality is not implemented yet.\n\n Please refer to documentation in order to check how Getter, Setter and Deleter functions must work.\n \"\"\"\n\n def __init__(\n self,\n use_all_stop_setters: bool = False,\n use_all_bus_setters: bool = False,\n use_all_stop_deleters: bool = True,\n use_all_bus_deleters: bool = True,\n auto_save_stop: bool = False\n ):\n \"\"\"\n :param use_all_stop_setters: if True, use all the defined Stop Setters when saving a Stop (default=False)\n :param use_all_bus_setters: if True, use all the defined Bus Setters when saving a Bus (default=False)\n :param use_all_stop_deleters: if True, use all the defined Stop Deleters when deleting a Stop (default=True)\n :param use_all_bus_deleters: if True, use all the defined Bus Deleters when deleting a Bus (default=True)\n :param auto_save_stop: if True, when searching a Stop through an Online getter,\n save it on the Setter/s automatically (default=False)\n :type use_all_stop_setters: bool\n :type use_all_bus_setters: bool\n :type use_all_stop_deleters: bool\n :type use_all_bus_deleters: bool\n \"\"\"\n self.stop_getters: OrderedDict[Callable, bool] = OrderedDict() # { StopGetter : online(True/False) }\n self.stop_setters: List[Callable] = list()\n self.stop_deleters: List[Callable] = list()\n self.bus_getters: List[Callable] = list()\n self.bus_setters: List[Callable] = list()\n self.bus_deleters: List[Callable] = list()\n self.use_all_stop_setters: bool = use_all_stop_setters\n self.use_all_bus_setters: bool = use_all_bus_setters\n self.use_all_stop_deleters: bool = use_all_stop_deleters\n self.use_all_bus_deleters: bool = use_all_bus_deleters\n self.auto_save_stop: bool = auto_save_stop\n\n # <log\n log.debug(\n \"Initialized a new PyBuses instance with the following configuration:\\n\"\n f\" * {len(self.stop_getters)} Stop Getters defined: {debug_functions(self.stop_getters)}\\n\"\n f\" * {len(self.stop_setters)} Stop Setters defined: {debug_functions(self.stop_setters)}\\n\"\n f\" * {len(self.stop_deleters)} Stop Deleters defined: {debug_functions(self.stop_deleters)}\\n\"\n f\" * {len(self.bus_getters)} Bus Getters defined: {debug_functions(self.bus_getters)}\\n\"\n f\" * {len(self.bus_setters)} Bus Setters defined: {debug_functions(self.bus_setters)}\\n\"\n f\" * {len(self.bus_deleters)} Bus Deleters defined: {debug_functions(self.bus_deleters)}\\n\"\n f\" * Using all Stop Setters: {self.use_all_stop_setters}\\n\"\n f\" * Using all Bus Setters: {self.use_all_bus_setters}\\n\"\n f\" * Using all Stop Deleters: {self.use_all_stop_deleters}\\n\"\n f\" * Using all Bus Deleters: {self.use_all_bus_deleters}\\n\"\n f\" * Auto Save Stops: {self.auto_save_stop}\\n\"\n )\n # log>\n\n def find_stop(self, stopid: int, online: Optional[bool] = None, auto_save: Optional[bool] = None) -> Stop:\n \"\"\"Find a Stop using the defined Stop Getters on this PyBuses instances.\n If no Stop Getters are defined, MissingGetters exception is raised.\n 'online' parameter define if using Online or Offline getters, or all of them (using Offline getters first)\n :param stopid: ID of the Stop to find\n :param online: if True, only search on Online Stop Getters |\n if False, only search on Offline Stop Getters |\n if None, search on all Stop Getters, but search on Offline Getters first (default=None)\n :param auto_save: if True and the Stop was fetched by an Online getter, save it using the 'save_stop' method |\n if False, do nothing about it |\n if None, use default value on this PyBuses instance (default=None) |\n Exceptions produced by the Stop Setters will be ignored and not raised;\n Additional parameters of the 'save_stop' method will be used as defaulted\n :type stopid: int\n :type online: bool or None\n :type auto_save: bool\n :return: Stop object\n :rtype: list of Stop or False or Exception\n :raise: MissingGetters if no getters are defined |\n StopNotFound if the stop could not be found by any of the getters, and all the getters worked fine |\n StopNotExist if a getter raised this exception (the getter is sure that the stop does not exist) |\n StopGetterUnavailable if the stop could not be fetched from any getter, no getter raised StopNotExist,\n and at least one getter raised StopGetterUnavailable\n \"\"\"\n getters: List[Callable] = self.get_stop_getters(online=online, sort=True, online_first=False)\n if auto_save is None:\n # Get instance auto_save parameter if not defined on this method call\n auto_save = self.auto_save_stop\n\n if not getters:\n msg = \"No Stop getters defined on this PyBuses instance\"\n log.warning(msg)\n raise MissingGetters(msg)\n else:\n # <log\n log.debug(\n f\"Searching Stop ID #{stopid} having {len(getters)} getters available\"\n f\" ({'Online only' if online else ('Offline only' if online == False else 'Online & Offline')}, \"\n f\"{'Auto-Saving' if auto_save else 'Not Auto-Saving'})\"\n )\n # log>\n\n errors = False # set to True if any getter raised StopGetterUnavailable\n for getter in getters: # type: Callable\n try:\n stop = getter(stopid)\n\n # If the Getter raises StopNotExist exception, it will not be catched here\n # because it is confirmed by a trusted getter that the stop does not exist\n # and that exception must be catched by the caller to know that situation\n\n except StopNotFound:\n log.debug(f\"Stop ID #{stopid} not found using the Getter {function_to_str(getter)}\")\n continue\n\n except StopNotExist as ex:\n log.debug(f\"Stop ID #{stopid} reported as not existing by the Getter {function_to_str(getter)}\")\n raise ex\n # raise StopNotExist from ex\n\n except StopGetterUnavailable as ex:\n # <log\n log.warning(\n f\"Stop ID #{stopid} could not be found using the getter {function_to_str(getter)}\"\n \"\\n{ex}\" if str(ex) else \"\"\n )\n # log>\n errors = True\n continue\n\n else: # Stop found\n # <log\n log.debug(\n f\"Stop ID#{stopid} found using the Getter {function_to_str(getter)}!\\n\"\n f\"Stop Name: {stop.name}\\n\"\n f\"Stop Location: {stop.lat}, {stop.lon}\"\n f\"\\nStop additional info: {stop.other}\" if stop.other else \"\"\n )\n # log>\n\n if auto_save and getter in self.get_stop_getters(online=True):\n # Auto-Save stop if required and was found by an Online getter\n log.debug(f\"Stop ID #{stopid} must be saved on the Stop Setter/s available\")\n # TODO This should run threaded to avoid blocking the return while saving the Stop\n try:\n self.save_stop(stop)\n except SetterException:\n pass # QUESTION add log here? or just log on save_stop method?\n\n return stop\n\n if errors:\n # Stop not found, and Errors on one or more getters\n msg = \"Stop info{} could not be retrieved for any of the Stop getters available\"\n log.warning(msg.format(f\" for StopID #{stopid}\"))\n raise StopGetterUnavailable(msg.format(\"\"))\n\n else:\n # Stop not found, but No errors on the getters\n msg = \"Stop{} not found by any of the Stop getters available\"\n log.info(msg.format(f\" ID #{stopid}\"))\n raise StopNotFound(msg.format(\"\"))\n\n def save_stop(self, stop: Stop, update: bool = True, use_all_stop_setters: Optional[bool] = None):\n \"\"\"Save the provided Stop object on the Stop setters defined.\n The stop will only be saved on the first Setter where the Stop is saved successfully,\n unless 'use_all_stop_setters' attribute of PyBuses class or on this method is True.\n If no Stop Setters are defined, MissingSetters exception is raised.\n If none of the Stop setters worked, StopSetterUnavailable exception is raised.\n If the stop is saved on at least one Stop Setter, the Stop is considered successfully saved.\n :param stop: Stop object to save\n :param update: if True, when the Stop currently exists on a Setter data destination,\n update/overwrite stop on destination with the current data of the Stop provided (default=True)\n :param use_all_stop_setters: if True, save the Stop on all the Stop Setters |\n if False, save the Stop on the first Stop Setter where it is saved successfully |\n if None, use the value declared on this PyBuses instance (default=None)\n :type stop: Stop\n :type update: bool\n :type use_all_stop_setters: bool or None\n :raise: MissingSetters | StopSetterUnavailable\n \"\"\"\n setters: List[Callable] = self.get_stop_setters()\n if use_all_stop_setters is None:\n # Get instance use_all_stop_setters parameter if not defined on this method call\n use_all_stop_setters = self.use_all_stop_setters\n\n if not setters:\n msg = \"No Stop setters defined on this PyBuses instance\"\n log.warning(msg)\n raise MissingSetters(msg)\n else:\n # <log\n log.debug(\n f\"Saving Stop ID #{stop.stopid} having {len(setters)} setters available\"\n f\" ({'Update: YES' if update else 'Update: NO'}, \"\n f\"\"\"use_all_stop_setters={\n 'Using all Stop Setters' if use_all_stop_setters else 'Using only the first successful Stop Setter'\n })\"\"\"\n )\n # log>\n\n success = False # Variable to know if the Stop was successfully saved on at least one Stop Setter\n for setter in setters: # type: Callable\n try:\n setter(stop, update=update)\n\n except StopSetterUnavailable as ex:\n # <log\n log.warning(\n f\"Stop ID #{stop.stopid} ({stop.name}) \"\n f\"could not be saved using the setter {function_to_str(setter)}\"\n f\"\\n{ex}\" if str(ex) else \"\"\n )\n # log>\n\n continue\n\n else:\n success = True\n log.info(f\"Stop ID #{stop.stopid} ({stop.name}) saved successfully \"\n f\"using the setter {function_to_str(setter)}\")\n if not use_all_stop_setters:\n break\n\n if not success:\n msg = \"Stop{} could not be saved on any of the Stop setters defined\"\n log.warning(msg.format(f\" ID #{stop.stopid} ({stop.name}\"))\n raise StopSetterUnavailable(msg.format(\"\"))\n\n def delete_stop(self, stopid: int, use_all_stop_deleters: Optional[bool] = None):\n \"\"\"Delete the stop that matches the given Stop ID using the defined Stop Deleters.\n The stop will only be deleted on the first Deleter where the Stop was deleted successfully,\n unless use_all_stop_deleters attribute of PyBuses class is True (which is by default).\n No exceptions will be raised if the stop was not deleted because it was not registered.\n Only when all the Deleters themselves failed, StopDeleterUnavailable will be raised.\n If no Stop Deleters are defined, MissingDeleters exception is raised.\n :param stopid: Stop ID of the Stop to delete\n :param use_all_stop_deleters: if True, delete the Stop using all the Stop Deleters\n (default=use the value declared on this PyBuses instance)\n :type stopid: int\n :type use_all_stop_deleters: bool\n :raise: MissingDeleters | StopDeleterUnavailable\n \"\"\"\n deleters: List[Callable] = self.get_stop_deleters()\n\n if not deleters:\n raise MissingDeleters(\"No Stop deleters defined on this PyBuses instance\")\n else:\n log.debug(f\"Removing Stop ID #{stopid} having {len(deleters)} deleters available\")\n\n success = False # Variable to know at the end if the stop was successfully deleted with at least one deleter\n if use_all_stop_deleters is None:\n # Get instance use_all_stop_setters parameter if not defined on this method call\n use_all_stop_deleters = self.use_all_stop_deleters\n\n for deleter in deleters: # type: Callable\n try:\n deleter(stopid)\n except StopDeleterUnavailable:\n log.warning(f\"Stop ID #{stopid} could not be removed using the deleter {function_to_str(deleter)}\")\n continue\n else:\n success = True\n if not use_all_stop_deleters:\n break\n\n if not success:\n raise StopDeleterUnavailable(\"Stop could not be deleted with any of the Stop deleters defined\")\n\n def update_all_stops_from_getters(\n self,\n end: int,\n start: int = 1,\n threads: int = 0,\n update: bool = True,\n use_all_stop_setters: Optional[bool] = None\n ) -> UpdateAllStopsFromGettersReturn:\n \"\"\"Find all the stops when the online resources do not provide a full list of Stops.\n This method will manually search the Stops by ID sequentially\n between the 'start' and 'end' ranges of Stop IDs using the available Online Stop Getters.\n The method can run on the background using threads.\n The 'threads' parameter define how many threads will be used. By default is 0, which means use no threads.\n All the threads start on the method, and a list with all these created Threads is returned.\n All the found stops will be saved/updated using the Stop setters defined on the PyBuses instance.\n If no Stop Getters are defined, MissingGetters exception is raised.\n If no Stop Setters are defined, MissingSetters exception is raised.\n :param end: Stop ID limit to search\n :param start: First Stop ID to search (default=1)\n :param threads: number of threads to use (default=0: use no threads)\n :param update: if True, when a found Stop currently exists on a Setter data destination,\n update stop on destination with the current data of the Stop provided (default=True)\n :param use_all_stop_setters: if True, try to save each Stop on each one of the available Stop Setters\n (default=None: use the value declared on this PyBuses instance)\n :type end: int\n :type start: int\n :type threads: int\n :type update: bool\n :type use_all_stop_setters: bool or None\n :raise: MissingGetters or MissingSetters\n :return: The Namedtuple 'UpdateAllStopsFromGettersReturn' from helpers, which contains:\n 1.- 'created_threads': List of created threads (if no threads are used, empty list)\n 2.- 'error_stops': List with the Stop IDs of Stops that could not be fetched from any of the Getters\n 3.- 'non_saved_stops': List with the Stop IDs of Stops that could not be saved on any of the Setters\n 4.- 'threads_stop_event': Stop Event to stop all the threads created\n :rtype: UpdateAllStopsFromGettersReturn\n When using threads, the lists will be returned empty, but can be filled with stop IDs during threads execution.\n \"\"\"\n getters: List[Callable] = self.get_stop_getters(online=True) # Get all Online getters\n if not getters:\n msg = \"No Stop getters defined on this PyBuses instance, so cannot perform Update all stops from getters\"\n log.warning(msg)\n raise MissingGetters(msg)\n if not self.get_stop_setters():\n msg = \"No Stop setters defined on this PyBuses instance, so cannot perform Update all stops from getters\"\n log.warning(msg)\n raise MissingSetters(msg)\n return update_all_stops_from_getters(\n pybuses=self,\n use_all_stop_setters=use_all_stop_setters,\n start=start,\n end=end,\n update=update,\n threads=threads\n )\n\n def get_buses(self, stopid: int, sort_by: Optional[int] = BusSortMethods.TIME, reverse: bool = False) -> List[Bus]:\n \"\"\"Get a live list of all the Buses coming to a certain Stop and the remaining time until arrival.\n The bus list can be sorted using the 'sort_by' and one of the methods available on 'BusSortMethods'.\n By default, buses list is sorted by Time, from shorter time (first to arrive) to greater time (last to arrive).\n If no Bus Getters are defined, MissingGetters exception is raised.\n :param stopid: ID of the Stop to get the list of the incoming buses of\n :param sort_by: method used to sort buses; use constants available in 'BusSortMethods' (default=TIME).\n If None, the buses list will not be sorted.\n :param reverse: if True, reverse sort the buses (default=False)\n :type stopid: int\n :type sort_by: int or None\n :type reverse: bool\n :return: List of Buses\n :rtype: List[Bus]\n :raise: MissingGetters or StopNotExist or StopNotFound or BusGetterUnavailable\n \"\"\"\n getters: List[Callable] = self.get_bus_getters()\n\n if not getters:\n msg = \"No Bus getters defined on this PyBuses instance\"\n log.warning(msg)\n raise MissingGetters(msg)\n else:\n # <log\n log.debug(\n f\"Getting Buses for Stop ID #{stopid} having {len(getters)} getters available, \"\n f\"sort by {BusSortMethods._fields[list(BusSortMethods).index(sort_by)]} {'Reversed' if reverse else ''}\"\n if sort_by is not None else \"no sort\"\n )\n # log>\n\n for getter in getters: # type: Callable\n try:\n buses: List[Bus] = getter(stopid)\n if sort_by == BusSortMethods.TIME:\n buses.sort(key=lambda x: x.time, reverse=reverse)\n elif sort_by == BusSortMethods.LINE:\n buses.sort(key=lambda x: x.line, reverse=reverse)\n elif sort_by == BusSortMethods.ROUTE:\n buses.sort(key=lambda x: x.route, reverse=reverse)\n # <log\n msg = f\"Found {len(buses)} buses coming to the Stop ID #{stopid}\"\n if buses:\n for bus in buses:\n msg += f\"\\n{bus.line} ({bus.route}) - Time: {bus.time}\"\n log.debug(msg)\n # log>\n return buses\n\n except BusGetterUnavailable:\n # <log\n log.warning(\n f\"Buses for Stop ID #{stopid} could not be fetched \"\n f\"using the getter {function_to_str(getter)}\"\n )\n # log>\n continue\n\n msg = \"Bus list{} could not be retrieved with any of the Bus getters defined\"\n log.warning(msg.format(f\"for Stop ID #{stopid}\"))\n raise BusGetterUnavailable(msg.format(\"\"))\n\n def save_buses(self):\n \"\"\"Mock function that would save a list of buses to a local storage. Not implemented.\n :return:\n \"\"\"\n pass\n\n def delete_buses(self):\n \"\"\"Mock function that would delete a list of buses from a local storage. Not implemented.\n :return:\n \"\"\"\n pass\n\n def add_stop_getter(self, getter, online: bool = False):\n \"\"\"Add one Stop Getter to this PyBuses instance.\n It should be known if the Stop Getter is connected to a Online or a Offline source,\n and set the 'online' arg to True/False depending on it.\n :param getter: one or more StopGetter functions/methods\n :param online: boolean to define if the Getter is connected or not to a Online source (default=False)\n :type getter: StopGetter\n :type online: bool\n \"\"\"\n self.add_stop_getters(getter, online=online)\n\n def add_stop_getters(self, *args, **kwargs):\n \"\"\"Add multiple Stop Getters to this PyBuses instance.\n It should be known if the Stop Getters are connected to a Online or a Offline source,\n and set the 'online' kwarg to True/False depending on it.\n Online and Offline Getters should not be mixed on a single call to this method.\n :param args: one or more StopGetter functions/methods\n :param kwargs: online=True/False (if no value = False)\n \"\"\"\n online = kwargs.get(\"online\", False)\n for f in args: # type: Callable\n self.stop_getters[f] = bool(online)\n # <log\n log.debug(\n f\"Added {len(args)} new {'Online' if online else 'Offline'} Stop Getter. \"\n f\"Current list of Stop Getters:\\n{debug_functions(self.stop_getters)}\"\n )\n # log>\n\n def add_stop_setter(self, f: Callable):\n \"\"\"Add one Stop Setter to this PyBuses instance.\n :param f: one Stop Setter function/method\n :type f: function or class method\n \"\"\"\n self.stop_setters.append(f)\n log.debug(\n f\"Added a new Stop Setter. Current list of Stop Setters:\\n\"\n f\"{debug_functions(self.stop_setters)}\"\n )\n\n def add_stop_deleter(self, f: Callable):\n \"\"\"Add one Stop Deleter to this PyBuses instance.\n :param f: one Stop Deleter function/method\n :type f: function or class method\n \"\"\"\n self.stop_deleters.append(f)\n log.debug(\n f\"Added a new Stop Deleter. Current list of Stop Deleters:\\n\"\n f\"{debug_functions(self.stop_deleters)}\"\n )\n\n def add_bus_getter(self, f: Callable):\n \"\"\"Add one Bus Getter to this PyBuses instance.\n :param f: one Bus Getter function/method\n :type f: function or class method\n \"\"\"\n self.bus_getters.append(f)\n log.debug(\n f\"Added a new Bus Getter. Current list of Bus Getters:\\n\"\n f\"{debug_functions(self.bus_getters)}\"\n )\n\n def add_bus_setter(self, f: Callable):\n \"\"\"Add one Bus Setter to this PyBuses instance.\n :param f: one Bus Setter function/method\n :type f: function or class method\n \"\"\"\n self.bus_setters.append(f)\n log.debug(\n f\"Added a new Bus Setter. Current list of Bus Setters:\\n\"\n f\"{debug_functions(self.bus_setters)}\"\n )\n\n def add_bus_deleter(self, f: Callable):\n \"\"\"Add one Bus Deleter to this PyBuses instance.\n :param f: one Bus Deleter function/method\n :type f: function or class method\n \"\"\"\n self.bus_deleters.append(f)\n log.debug(\n f\"Added a new Bus Deleter. Current list of Bus Deleters:\\n\"\n f\"{debug_functions(self.bus_deleters)}\"\n )\n\n def get_stop_getters(\n self, online: Optional[bool] = None,\n sort: bool = True, online_first: bool = False\n ) -> List[Callable]:\n \"\"\"Get all the Stop Getters available on this PyBuses instance.\n The 'online' parameter can be specified to get only Online or Offline getters,\n otherwise all available getters will be returned.\n :param online: if True, get only getters connected to a Online source |\n if False, get only getters connected to a Offline source |\n if None, get both Online and Offline getters (default=None)\n :param sort: when getting both Online and Offline getters,\n return them sorted by the Online getters first, then Offline getters, or vice versa,\n depending on the 'online_first' parameter (default=True)\n :param online_first: when sorting the getters, if True, sort by the Online getters first, then Offline getters;\n if False, sort by the Offline getters first, then Online getters (default=False)\n :type online: bool | None\n :type sort: bool\n :type online_first: bool\n :return: list of available Stop Getters\n :rtype: List[Callable]\n \"\"\"\n if online is None:\n getters = list(self.stop_getters.keys())\n if sort:\n return sorted(getters, key=lambda k: self.stop_getters[k], reverse=online_first) # TODO Test sorted\n else:\n return getters\n else:\n return [g for g in self.stop_getters.keys() if self.stop_getters[g] == bool(online)]\n\n def get_stop_setters(self) -> List[Callable]:\n \"\"\"Get all the Stop Setters available on this PyBuses instance.\n :return: list of available Stop Setters\n :rtype: List[Callable]\n \"\"\"\n return self.stop_setters\n\n def get_stop_deleters(self) -> List[Callable]:\n \"\"\"Get all the Stop Deleters available on this PyBuses instance.\n :return: list of available Stop Deleters\n :rtype: List[Callable]\n \"\"\"\n return self.stop_deleters\n\n def get_bus_getters(self) -> List[Callable]:\n \"\"\"Get all the Bus Getters available on this PyBuses instance.\n :return: list of available Bus Getters\n \"\"\"\n return self.bus_getters\n\n def get_bus_setters(self) -> List[Callable]:\n \"\"\"Get all the Bus Setters available on this PyBuses instance.\n :return: list of available Bus Setters\n :rtype: List[Callable]\n \"\"\"\n return self.bus_setters\n\n def get_bus_deleters(self) -> List[Callable]:\n \"\"\"Get all the Bus Deleters available on this PyBuses instance.\n :return: list of available Bus Deleters\n :rtype: List[Callable]\n \"\"\"\n return self.bus_deleters\n", "sub_path": "pybuses/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 28853, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 27, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 87, "usage_type": "name"}, {"api_name": "helpers.debug_functions", "line_number": 97, "usage_type": "call"}, {"api_name": "helpers.debug_functions", "line_number": 98, "usage_type": "call"}, {"api_name": "helpers.debug_functions", "line_number": 99, "usage_type": "call"}, {"api_name": "helpers.debug_functions", "line_number": 100, "usage_type": "call"}, {"api_name": "helpers.debug_functions", "line_number": 101, "usage_type": "call"}, {"api_name": "helpers.debug_functions", "line_number": 102, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 111, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 135, "usage_type": "name"}, {"api_name": "helpers.function_to_str", "line_number": 163, "usage_type": "call"}, {"api_name": "helpers.function_to_str", "line_number": 167, "usage_type": "call"}, {"api_name": "helpers.function_to_str", "line_number": 174, "usage_type": "call"}, {"api_name": "helpers.function_to_str", "line_number": 184, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 214, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 232, "usage_type": "name"}, {"api_name": "helpers.function_to_str", "line_number": 261, "usage_type": "call"}, {"api_name": "helpers.function_to_str", "line_number": 271, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 280, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 294, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 294, "usage_type": "name"}, {"api_name": "helpers.function_to_str", "line_number": 310, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 326, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 358, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 358, "usage_type": "name"}, {"api_name": "helpers.update_all_stops_from_getters", "line_number": 367, "usage_type": "call"}, {"api_name": "helpers.UpdateAllStopsFromGettersReturn", "line_number": 327, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 376, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 392, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 392, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 409, "usage_type": "name"}, {"api_name": "helpers.function_to_str", "line_number": 429, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 376, "usage_type": "name"}, {"api_name": "helpers.debug_functions", "line_number": 475, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 479, "usage_type": "name"}, {"api_name": "helpers.debug_functions", "line_number": 487, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 490, "usage_type": "name"}, {"api_name": "helpers.debug_functions", "line_number": 498, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 501, "usage_type": "name"}, {"api_name": "helpers.debug_functions", "line_number": 509, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 512, "usage_type": "name"}, {"api_name": "helpers.debug_functions", "line_number": 520, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 523, "usage_type": "name"}, {"api_name": "helpers.debug_functions", "line_number": 531, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 535, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 537, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 537, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 564, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 564, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 571, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 571, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 578, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 578, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 584, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 584, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 591, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 591, "usage_type": "name"}]} +{"seq_id": "285051211", "text": "import math\nimport numpy as np\nimport numpy.fft as fft\nfrom scipy.interpolate import interp1d\nimport torch\nimport torchaudio\n\nclass AudioTransformSpectrogram:\n def __init__(self, size, audio_channels, mel_scale=False):\n self.size = size\n self.audio_channels = audio_channels\n self.channels = audio_channels * 2\n self.mel_scale = mel_scale\n\n self.fft_size = size * 2\n self.hop_size = size * 2\n self.fft_out = self.fft_size // 2 + 1\n\n def __call__(self, audio, reverse=False):\n y = np.abs(np.fft.fftfreq(self.fft_size)[1:self.fft_out]) * Audio.rate\n y = torch.tensor(y).float()\n mel_y = np.log(1 + y / 1000) * 1000 / np.log(2)\n new_y = np.linspace(min(mel_y), max(mel_y), self.size)\n if reverse:\n # Flip y\n audio = torch.flip(audio, [2])\n # Convert frequency axis to linear scale\n if self.mel_scale:\n interpolate = interp1d(new_y, audio, fill_value='extrapolate')\n audio = torch.tensor(interpolate(mel_y), dtype=torch.float32)\n # Separate\n mag = audio[0:2]\n phase = audio[2:4]\n # Unscale\n mag = torch.exp(mag * math.log(self.size))\n phase = phase * math.pi\n # Extend time to account for samples that were clipped in FFT\n new_mag = torch.zeros(\n self.audio_channels, self.size + 1, self.fft_out)\n new_phase = torch.zeros(\n self.audio_channels, self.size + 1, self.fft_out)\n new_mag[:, :self.size, :self.size] = mag\n new_phase[:, :self.size, :self.size] = phase\n mag = new_mag\n phase = new_phase\n # Permute dimensions\n mag = mag.permute(0, 2, 1)\n phase = phase.permute(0, 2, 1)\n # Combine magnitude and phase\n sine = mag * torch.sin(phase)\n cosine = mag * torch.cos(phase)\n audio = torch.stack([cosine, sine]).permute(1, 2, 3, 0);\n # Perform inverse FFT\n audio = torchaudio.functional.istft(\n audio, self.fft_size, self.hop_size, center=False,\n length = 2 * self.fft_out * self.size)\n else:\n # Perform FFT\n audio = torch.stft(\n audio, self.fft_size, self.hop_size, center=False)\n # Get magnitude and phase\n mag, phase = torchaudio.functional.magphase(audio)\n # Permute dimensions\n mag = mag.permute(0, 2, 1)\n phase = phase.permute(0, 2, 1)\n # Clip length and Nyquist frequency\n mag = mag[:, :self.size, :self.size]\n phase = phase[:, :self.size, :self.size]\n # Scale\n mag = torch.log(mag) / math.log(self.size)\n mag = torch.clamp(mag, -1, 1)\n phase = phase / math.pi\n # Combine\n audio = torch.cat([mag, phase])\n # Convert frequency axis to mel scale\n if self.mel_scale:\n interpolate = interp1d(mel_y, audio, fill_value='extrapolate')\n audio = torch.tensor(interpolate(new_y), dtype=torch.float32)\n # Flip y\n audio = torch.flip(audio, [2])\n\n return audio\n\nclass AudioTransform:\n def __init__(self, size, audio_channels):\n self.size = size\n self.audio_channels = audio_channels\n self.channels = audio_channels\n\n def __call__(self, audio, reverse=False):\n if reverse:\n # To 1-D\n audio = audio.contiguous().view(\n self.audio_channels, self.size * self.size)\n else:\n # Clip length\n audio = audio[:, :self.size * self.size]\n # To 2-D\n audio = audio.contiguous().view(\n self.audio_channels, self.size, self.size)\n # Normalize\n min = audio.min().abs()\n max = audio.max().abs()\n audio = audio / (1e-9 + torch.max(min, max))\n\n return audio\n\nclass Audio:\n rate = 44100\n full_length = 30\n\n def __init__(self, size=256, audio_channels=2, spectrogram=False):\n self.length = Audio.length(size, spectrogram)\n t_func = AudioTransformSpectrogram if spectrogram else AudioTransform\n self.transform = t_func(size, audio_channels)\n self.channels = self.transform.channels\n\n # Get length in seconds\n def length(size, spectrogram):\n return 3 * (size / 256) ** 2 * (1 if spectrogram else 0.5)\n\nclass AudioReader(Audio):\n def __init__(self, size=256, audio_channels=2, spectrogram=False, offset=0):\n super(AudioReader, self).__init__(size, audio_channels, spectrogram)\n assert offset >= 0 and offset < Audio.full_length // self.length\n cue = offset * self.length\n self.chain = torchaudio.sox_effects.SoxEffectsChain()\n self.chain.append_effect_to_chain('rate', [str(Audio.rate)])\n self.chain.append_effect_to_chain('channels', [str(audio_channels)])\n self.chain.append_effect_to_chain('pad', ['0', str(Audio.full_length)])\n self.chain.append_effect_to_chain('trim', [str(cue), str(self.length)])\n self.dim = [audio_channels, self.length * Audio.rate]\n\n def __call__(self, path):\n self.chain.set_input_file(path)\n try:\n audio, _ = self.chain.sox_build_flow_effects()\n except RuntimeError:\n audio = torch.zeros(self.dim)\n audio = self.transform(audio)\n return audio\n\nclass AudioWriter(Audio):\n output_rate = 44100\n\n def __init__(self, size=256, audio_channels=2, spectrogram=False):\n super(AudioWriter, self).__init__(size, audio_channels, spectrogram)\n self.resample = torchaudio.transforms.Resample(\n Audio.rate, AudioWriter.output_rate)\n\n def __call__(self, path, audio):\n audio = self.transform(audio, reverse=True)\n # This takes a long time\n audio = self.resample(audio)\n audio = audio / (1e-9 + torch.max(audio.min().abs(), audio.max().abs()))\n torchaudio.save(path, audio, AudioWriter.output_rate)\n", "sub_path": "data/audio.py", "file_name": "audio.py", "file_ext": "py", "file_size_in_byte": 6151, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.abs", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.fft.fftfreq", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.flip", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 35, "usage_type": "call"}, {"api_name": "math.log", "line_number": 35, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.sin", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.cos", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 52, "usage_type": "call"}, {"api_name": "torchaudio.functional.istft", "line_number": 54, "usage_type": "call"}, {"api_name": "torchaudio.functional", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.stft", "line_number": 59, "usage_type": "call"}, {"api_name": "torchaudio.functional.magphase", "line_number": 62, "usage_type": "call"}, {"api_name": "torchaudio.functional", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.log", "line_number": 70, "usage_type": "call"}, {"api_name": "math.log", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 71, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.flip", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 104, "usage_type": "call"}, {"api_name": "torchaudio.sox_effects.SoxEffectsChain", "line_number": 127, "usage_type": "call"}, {"api_name": "torchaudio.sox_effects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 139, "usage_type": "call"}, {"api_name": "torchaudio.transforms.Resample", "line_number": 148, "usage_type": "call"}, {"api_name": "torchaudio.transforms", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 155, "usage_type": "call"}, {"api_name": "torchaudio.save", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "153671055", "text": "import redis\nfrom threading import Thread\n\nif __name__ == \"__main__\":\n db = redis.Redis()\n pubsub = db.pubsub()\n pubsub.subscribe('chat')\n\n def listen():\n for item in pubsub.listen():\n if type(item['data']) is bytes:\n print('Message: ', item['data'].decode(\"utf-8\"))\n\n listener = Thread(target=listen, args=())\n listener.daemon = True\n listener.start()\n\n print('Write your message:')\n while True:\n message = input()\n if message.lower() == 'quit':\n exit()\n db.publish('chat', message)", "sub_path": "redis_chat.py", "file_name": "redis_chat.py", "file_ext": "py", "file_size_in_byte": 575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "redis.Redis", "line_number": 5, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "34681223", "text": "import os\nimport logging\nimport time\n\nfrom flask import current_app as app, request, Blueprint\n\nfrom GRDomain.models import db, Domain\nfrom GRDomain.utils import DtalkBot\n\n\nbot = DtalkBot()\n\nviews = Blueprint('views', __name__)\n\n\n@views.route('/list', methods=['GET'])\ndef index():\n return 'list page'\n\n\n@views.route('/')\ndef home():\n return 'index page'\n\n\n@views.route('/status', methods=['GET', 'POST'])\ndef status():\n uuid = request.form.get('uuid')\n ip = request.form.get('ip')\n prefix_subdomain = request.form.get('domain').split('.')[0]\n chk_status = Domain.query.filter_by(uuid=uuid, ip=ip).first()\n if chk_status.prefix == prefix_subdomain and not chk_status.status:\n chk_status.status = True\n db.session.commit()\n db.session.close()\n bot.msg(\"[{date}] {ip} - {subdomain} status up\".format(\n date=time.strftime(\"%m/%d/%Y %X\"),\n ip=ip,\n subdomain=prefix_subdomain + '.' + app.config['DOMAIN']\n ))\n return 'update domain status ok'\n db.session.close()\n return 'not thing to do'\n\n\n", "sub_path": "GRDomain/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "GRDomain.utils.DtalkBot", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "GRDomain.models.Domain.query.filter_by", "line_number": 31, "usage_type": "call"}, {"api_name": "GRDomain.models.Domain.query", "line_number": 31, "usage_type": "attribute"}, {"api_name": "GRDomain.models.Domain", "line_number": 31, "usage_type": "name"}, {"api_name": "GRDomain.models.db.session.commit", "line_number": 34, "usage_type": "call"}, {"api_name": "GRDomain.models.db.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "GRDomain.models.db", "line_number": 34, "usage_type": "name"}, {"api_name": "GRDomain.models.db.session.close", "line_number": 35, "usage_type": "call"}, {"api_name": "GRDomain.models.db.session", "line_number": 35, "usage_type": "attribute"}, {"api_name": "GRDomain.models.db", "line_number": 35, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 39, "usage_type": "name"}, {"api_name": "GRDomain.models.db.session.close", "line_number": 42, "usage_type": "call"}, {"api_name": "GRDomain.models.db.session", "line_number": 42, "usage_type": "attribute"}, {"api_name": "GRDomain.models.db", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "239310420", "text": "import os \nimport datetime\nimport json\n\nDATABASEFILE = \"database.json\"\n\n\ndef deserialize():\n tickets = {}\n if os.path.exists(DATABASEFILE) and os.stat(DATABASEFILE).st_size > 0:\n fin = open(DATABASEFILE)\n tickets = json.load(fin)\n fin.close()\n return tickets\n\n\ndef serialize(tickets):\n fout = open(DATABASEFILE, \"w\")\n json.dump(tickets, fout)\n fout.close()\n\n\ndef insertticket():\n tickets = deserialize()\n newTicketId = len(tickets.items()) + 1\n tickets[newTicketId] = {\n \"date\": str(datetime.datetime.now()),\n \"used\": False\n }\n serialize(tickets)\n return newTicketId\n\n\ndef checkout(id):\n tickets = deserialize()\n ticketValid = False\n if id in tickets.keys():\n if not tickets[id][\"used\"]:\n tickets[id][\"used\"] = True\n ticketValid = True\n serialize(tickets)\n return ticketValid\n", "sub_path": "Python_Net_Programming/exercises/tickets/persistentFileHandler.py", "file_name": "persistentFileHandler.py", "file_ext": "py", "file_size_in_byte": 891, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 10, "usage_type": "call"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "649320902", "text": "import tensorrt as trt # Version TensotRT '5.1.5.0'\n'''\n\t\t\t\tTensorRT\t\t\t\tPytorch / TF / Keras / MXNet / Caffe2\narchitecture: \tsimplified network \t\tgeneral network for train/eval/test \nlang: \t\t\tC++ \t\t\t\t\tpython\ndtype: \t\t\tFP16 / int8 \t\t\tFP32\nAccuracy:\t\t79.x / 76.x\t\t\t\t80\n\n'''\nimport numpy as np\nfrom argparse import ArgumentParser\nimport os \nfrom utils import common\n\nimport cv2\n\nclass CFG(object):\n\t\"\"\"docstring for cfg\"\"\"\n\tdef __init__(self,args):\n\t\tself.onnx_file_path = args.model_path # gender_model.onnx\n\t\tself.model_name = self.onnx_file_path.split('.')[0].split('/')[-1]\n\t\tself.engine_file_path = args.model_path\n\t\tself.model_dtype = trt.float16\n\t\tself.input_shape = (3,224,224) # (c,h,w)\n\t\tself.TRT_LOGGER = trt.Logger(trt.Logger.WARNING)\n\t# def __init__(self,path):\n\t# \tself.model_name = 'age'\n\t# \tself.onnx_file_path = .onnx\n\t# \tself.engine_file_path = .trt\n\t# \tself.model_dtype = trt.float32\n\t# \tself.input_shape = (3,224,224) # (c,h,w)\n\t#\tself.TRT_LOGGER = trt.Logger(trt.Logger.WARNING)\n\ndef GiB(val):\n\treturn val * 1 << 30\n\ndef load_onnx_model1(args):\n\t# with builder = trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:\n\t# \twith open(model_path, 'rb') as model:\n\t# \t\tparser.parse(model.read())\n\tapex = onnxparser.create_onnxconfig()\n\n\tapex.set_model_file_name(args.onnx_model_name)\n\tapex.set_model_dtype(args.model_dtype)\n\tapex.set_print_layer_info(True)\n\ttrt_parser = onnxparser.create_onnxparser(apex)\n\tdata_type = apex.get_model_dtype()\n\tonnx_filename = apex.get_model_file_name()\n\ttrt_parser.parse(onnx_filename, data_type)\n\n\ttrt_parser.convert_to_trtnetwork()\n\ttrt_network = trt_parser.get_trtnetwork()\n\treturn trt_network\n\ndef load_onnx_model2(args):\n\twith trt.Builder(args.TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, args.TRT_LOGGER) as parser:\n\t\twith open(args.onnx_model_name, 'rb') as model:\n\t\t\t# Set the model dtype to half , fp16\n\t\t\tparser.parse(model.read())\n\treturn builder.build_cuda_engine(network)\n\n\n\ndef get_engine(args,cfg):\n\t\"\"\"Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it.\"\"\"\n\tdef build_engine():\n\t\t\"\"\"Takes an ONNX file and creates a TensorRT engine to run inference with\"\"\"\n\t\twith trt.Builder(cfg.TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, cfg.TRT_LOGGER) as parser:\n\t\t\tbuilder.max_workspace_size = GiB(args.model_memory)\n\t\t\tbuilder.max_batch_size = args.max_batch_size\n\t\t\t\n\t\t\tif args.precision == 'fp16':\n\t\t\t\t# set to fp16 \n\t\t\t\tprint('force to fp16')\n\t\t\t\tbuilder.fp16_mode = True\n\t\t\t\tbuilder.strict_type_constraints = True\n\t\t\telif args.precision == 'int8':\n\t\t\t\t# set to int8\n\t\t\t\tbuilder.int8_mode = True\n\n\t\t\t\t'''\n\t\t\t\tNUM_IMAGES_PER_BATCH = 5 \n\t\t\t\tbatch = ImageBatchStream(NUM_IMAGES_PER_BATCH, calibration_files)\n\t\t\t\tInt8_calibration = EntropyCalibrator(['input_node_name'],batchstream)\n\t\t\t\ttrt_builder.int8_calibrator = Int8_calibrator\n\t\t\t\t'''\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t\t# Parse model file\n\t\t\tif not os.path.exists(cfg.onnx_file_path):\n\t\t\t\tprint('ONNX file {} not found, please run pytorch2ONNX.py first to generate it.'.format(cfg.onnx_file_path))\n\t\t\t\texit(0)\n\t\t\tprint('Loading ONNX file from path {}...'.format(cfg.onnx_file_path))\n\t\t\twith open(cfg.onnx_file_path, 'rb') as model:\n\t\t\t\tprint('Beginning ONNX file parsing')\n\t\t\t\tparser.parse(model.read())\n\t\t\tprint('Completed parsing of ONNX file')\n\t\t\tprint('Building an engine from file {}; this may take a while...'.format(cfg.onnx_file_path))\n\t\t\t\n\n\t\t\tprint(network.num_layers)\n\t\t\tnetwork.mark_output(network.get_layer(network.num_layers-1).get_output(0))\n\t\t\t\n\t\t\tengine = builder.build_cuda_engine(network)\n\t\t\tprint(\"Completed creating Engine\")\n\t\t\twith open(cfg.engine_file_path, \"wb\") as f:\n\t\t\t\tf.write(engine.serialize())\n\t\t\treturn engine\n\n\tif not args.build and os.path.exists(cfg.engine_file_path):\n\t\t# If a serialized engine exists, use it instead of building an engine.\n\t\tprint(\"Reading engine from file {}\".format(cfg.engine_file_path))\n\t\twith open(cfg.engine_file_path, \"rb\") as f, trt.Runtime(cfg.TRT_LOGGER) as runtime:\n\t\t\treturn runtime.deserialize_cuda_engine(f.read())\n\telse:\n\t\tprint('------------------ Building the Engine ------------------')\n\t\tprint(\"Building engine from file {}\".format(cfg.onnx_file_path))\n\t\treturn build_engine()\n\t#both are returning deserialize cuda engine\n\n\n# Run inference on device\ndef infer(context, input_img, output_size, batch_size):\n\t# Load engine\n\tengine = context.get_engine()\n\tassert(engine.get_nb_bindings() == 2)\n\t# Convert input data to Float32\n\tinput_img = input_img.astype(np.float32)\n\t# Create output array to receive data\n\toutput = np.empty(output_size, dtype = np.float32)\n \n\t# Allocate device memory\n\td_input = cuda.mem_alloc(batch_size * input_img.nbytes)\n\td_output = cuda.mem_alloc(batch_size * output.nbytes)\n \n\tbindings = [int(d_input), int(d_output)]\n \n\tstream = cuda.Stream()\n \n\t# Transfer input data to device\n\tcuda.memcpy_htod_async(d_input, input_img, stream)\n\t# Execute model\n\tcontext.enqueue(batch_size, bindings, stream.handle, None)\n\t# Transfer predictions back\n\tcuda.memcpy_dtoh_async(output, d_output, stream)\n \n\t# Return predictions\n\treturn output\n\t\n\ndef img_input():\n\treturn np.load('data/img.npy')\n\ndef parse_args(argv=None):\n\tparser = ArgumentParser()\n\tparser.add_argument('-p','--precision', default='fp16', type=str, dest='precision',\n\t\t\t\t\t\thelp='inference precision, fp32, fp16, int8 etc.')\n\tparser.add_argument('--model', type=str, dest='model_path',\n\t\t\t\t\t\thelp='model path')\n\tparser.add_argument('--model_memory', type=int, dest='model_memory',\n\t\t\t\t\t\thelp='engine memory')\n\tparser.add_argument('--model_max_batch_size', type=int, dest='max_batch_size',\n\t\t\t\t\t\thelp='engine batch')\n\t# parser.add_argument('-MP','--model_path', default='fp16', type=str, dest='engine_file_path',\n\t# \t\t\t\t\thelp='Path to the model')\n\tparser.add_argument('--build', action='store_true',dest='build',\n\t\t\t\t\t\thelp='build the model, (model will be overwrite if model exists)')\n\tparser.add_argument('--batch',default = 1, type=int ,dest='batch_size',\n\t\t\t\t\t\thelp='model batch size')\n\targs = parser.parse_args()\n\treturn args\n\ndef normalize(batch_img: np.array): # support \n\tbatch_img = np.true_divide(batch_img,255)\n\tmean = np.array([0.485, 0.456, 0.406]).reshape(1,3,1,1)\n\tstd = np.array([0.229, 0.224, 0.225]).reshape(1,3,1,1)\n\tbatch_img2 = np.subtract(batch_img,mean)\n\tbatch_img3 = np.true_divide(batch_img2,std)\n\treturn batch_img\n\ndef batch_resize( images :list): # [HWC, HWC, HWC, HWC]\n\tresize_shape = (512,320)\n\ttemp_images = np.array([cv2.resize(image, resize_shape) for image in images]) # cv2.INTER_LINEAR) NHWC\n\tbatch_image = np.transpose(temp_images,(0,3,1,2)) # RGB , NCHW\n\treturn batch_image # array(N,C,H,W)\n\ndef decode_segmap(image, nc=21):\n\n\t# with open('testing/colors.txt') as infile:\n\t# \tlabel_colors = [line.split('\\n')[0]for line in infile.readlines()]\n\t# \tlabel_colors = np.array([[int(x)for x in color.split(\" \")] for color in label_colors])\n\n\tlabel_colors = np.array([(0, 0, 0), # 0=background\n # 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle\n (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),\n # 6=bus, 7=car, 8=cat, 9=chair, 10=cow\n (0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),\n # 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person\n (192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),\n # 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor\n (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)])\n \n\tr = np.zeros_like(image).astype(np.uint8)\n\tg = np.zeros_like(image).astype(np.uint8)\n\tb = np.zeros_like(image).astype(np.uint8)\n\n\tfor l in range(0, nc):\n\t\tidx = image == l\n\t\tr[idx] = label_colors[l, 0]\n\t\tg[idx] = label_colors[l, 1]\n\t\tb[idx] = label_colors[l, 2]\n\t \n\trgb = np.stack([r, g, b], axis=2)\n\treturn rgb\n\ndef main():\n\targs = parse_args()\n\tcfg = CFG(args)\n\t'''\n\ttensorrt.DataType.FLOAT \ttensorrt.float32\n\ttensorrt.DataType.HALF \t\ttensorrt.float16\n\ttensorrt.DataType.INT32\t\ttensorrt.int32\n\ttensorrt.DataType.INT8 \t\ttensorrt.int8\n\t'''\n\n\t# assert os.path.exists(args.model_path)\n\n\toutput_shapes = (64,21,10,16)\n\n\tinput_img = cv2.imread('trump.jpg') # BGR , HWC \n\tori_shape = input_img.shape\n\tprint(ori_shape)\n\n\n\tinput_img = input_img[:,:,[2,1,0]] # BGR - RGB , HWC \n\n\n\t# bgr = input_img[:,:,::-1] # RGB - BGR , HWC \n\t# cv2.imwrite(\"testing/test2.jpg\",bgr)\n\n\n\tbatch_img = list(np.tile(input_img,[64,1,1,1]))\n\n\t# pre-processing\n\tprint(1,64,batch_img[0].shape)\n\tbatch_img = batch_resize(batch_img)\n\tprint(2,batch_img.shape)\n\tbatch_img = normalize(batch_img)\n\tprint(3,batch_img.shape)\n\n\t# TensorRT\n\tbatch_img = np.array(batch_img, dtype=np.float32, order='C')\n\twith get_engine(args, cfg) as engine, engine.create_execution_context() as context:\n\t\tinputs, outputs, bindings, stream = common.allocate_buffers(engine)\n\n\t\tinputs[0].host = batch_img\n\n\t\ttrt_outputs = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream, batch_size = args.batch_size)\n\n\t\tprint(trt_outputs)\n\n\ttrt_outputs = trt_outputs[0].reshape(output_shapes)\n\tnp.save('trt_outputs.npy',trt_outputs)\n\tprint(trt_outputs.shape)\n\trs = trt_outputs[0]\n\tprint(rs.shape)\n\n\n\n\n\t# om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()\n\n\tom = np.argmax(rs,axis = 0)\n\tprint(om.shape)\n\n\trgb = decode_segmap(om)\n\n\tbgr = rgb[:,:,::-1] # RGB - BGR\n\t# rgb = rgb[...,[2,0,1]] # RGB2BGR\n\t\n\tprint('rgb',bgr.shape)\n\tframe = cv2.resize(bgr, (ori_shape[0],ori_shape[1]), interpolation=cv2.INTER_LINEAR)\n\tframe = np.transpose(frame,(1,0,2)) # BGR , HWC\n\tcv2.imwrite(\"testing/test.jpg\",frame)\n\n\t# import matplotlib.pyplot as plt\n\t# plt.imshow(rgb); plt.show()\n\texit()\n\n\n\n\t# batch_img = np.ascontiguousarray(batch_img)\n\t# temp_img = temp_img.flatten()\n\n\t# get_engine(args,cfg)\n\n\t# print(trt_outputs)\n\t# Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.\n\t# print(trt_outputs.shape)\n\t# for trt_output in trt_outputs:\n\t# \tprint(trt_output)\n\n\n\n\n\n\t# om = np.argmax(trt_outputs)\n\n\t# with open('testing/colors.txt') as infile:\n\t# \tclasses = [line.split('\\n')[0]for line in infile.readlines()]\n\t# \tclasses = np.array([[int(x)for x in shape.split(\" \")] for shape in classes])\n\t# print(classes.shape)\n\n\tfor idx, _class in enumerate(classes):\n\n\n\t\t'''\n\t\tprint(idx, _class)\n\t\t# frame = np.array([np.ones((10,16))* RGB for RGB in _class])\n\t\t# print(trt_outputs[idx])\n\t\tframe = np.multiply(trt_outputs[idx],_class.reshape(3,1,1)) # RGB , CHW\n\t\t\n\t\tprint(frame.shape)\n\t\tprint(frame)\n\t\t# frame = np.dot(frame,trt_outputs[0][idx])\n\t\t# print(frame)\n\t# for idx,value in enumerate(trt_outputs[0]):\n\t\tframe = np.transpose(frame,(1,2,0)) # RGB , HWC\n\t\tprint(frame.shape, ori_shape)\n\t\tframe = cv2.resize(frame, (ori_shape[0],ori_shape[1]), interpolation=cv2.INTER_LINEAR)\n\n\t\t# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\t\tframe = frame[...,[2,0,1]]\n\n\t\t# normalise\n\t\tframe *= (255.0/frame.max())\n\n\t\tprint(frame)\n\t\t# cv2.imwrite(\"testing/layer_{}.jpg\".format(idx),frame)\n\t\t'''\n\t\ttemp = cv2.resize(trt_outputs[idx], (ori_shape[1],ori_shape[0]), interpolation=cv2.INTER_LINEAR)\n\t\t# temp += 100\n\t\t# print(temp.max(),temp.min())\n\t\t# cv2.imwrite(\"testing/layer_{}.jpg\".format(idx),temp)\n\t# cv2.imwrite(\"testing/test.jpg\",input_img[0])\n\n\t# trt_outputs = [output.reshape(shape) for output, shape in zip(trt_outputs, output_shapes)]\n\n\n\n\t# print(load_onnx_model2)\n\n\t'''\n\t# Template\n\tTRT_LOGGER = trt.Logger(trt.Logger.WARNING)\n\tbuilder = trt.Builder(TRT_LOGGER)\n\n\tnetwork = builder.create_network()\n\tdataLayer = network.add_input('data',trt.DataType.FLOAT,(c,h,w))\n\t# Add network layer\n\tnetwork.mark_output(outputLayer.get_output(0))\n\n\tengine = builder.build_cuda_engine(network)\n\tcontext = engine.create_execution_context()\n\tcontext.execute_async(bindings=[d_input,d_output])\n\t'''\n\n\n\t'''\n\tTRT_LOGGER = trt.Logger(trt.Logger.WARNING)\n\twith trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network:\n\t\tbuilder.max_workspace_size = GiB(2)\n\n\t\tdataLayer = network.add_input('data',args.model_dtype,args.input_shape)\n\n\t\tnetwork.mark_output(outputLayer.get_output(0))\n\n\t\treturn builder.build_cuda_engine(network)\n\tcontext = engine.create_execution_context()\n\tcontext.execute_async(bindings=[d_input,d_output])\n\tmodelstream = engine.serialize()\n\ttrt.utils.write_engine_to_file(args.trt_model_name, modelstream)\n\tengine.destroy()\n\tbuilder.destroy()\n\t'''\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n# docker run --privileged --rm -it -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$DISPLAY -v ~/Desktop/python:/py -w /py --runtime=nvidia nvcr.io/nvidia/tensorrt:19.09-py3 bash\n# cd TensorRT_Deployment/ && pip3 install opencv-python matplotlib && apt-get install -y libsm6 libxext6 libxrender1\n# python3 convert_trt_val.py --model resnet18.trt\n", "sub_path": "resnet18_mhp_onnx2trt.py", "file_name": "resnet18_mhp_onnx2trt.py", "file_ext": "py", "file_size_in_byte": 12960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorrt.float16", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorrt.Logger", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorrt.Builder", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorrt.OnnxParser", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorrt.Builder", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorrt.OnnxParser", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tensorrt.Runtime", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 153, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.true_divide", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.true_divide", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 204, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 206, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 214, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 253, "usage_type": "attribute"}, {"api_name": "utils.common.allocate_buffers", "line_number": 255, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 255, "usage_type": "name"}, {"api_name": "utils.common.do_inference", "line_number": 259, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 259, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 274, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 283, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 283, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 284, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 285, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 342, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 342, "usage_type": "attribute"}]} +{"seq_id": "576788703", "text": "# Copyright 2012 Cloudscaling Group, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport webob\n\nfrom nova import db\nfrom nova.api.gce.views import operations as operations_view\nfrom nova.api.gce.views import disks as disks_view\nfrom nova.api.openstack import wsgi as openstack_wsgi\nfrom nova.api.gce import wsgi as gce_wsgi\nfrom nova import exception\nfrom nova import volume\n\n\nclass Controller(openstack_wsgi.Controller):\n\n _view_builder_class = disks_view.ViewBuilder\n\n def __init__(self, **kwargs):\n super(Controller, self).__init__(**kwargs)\n self._volume_api = volume.API()\n self._operations_view = operations_view.ViewBuilder()\n\n def index(self, req):\n volumes = self._get_volumes(req)\n return self._view_builder.index(req, volumes)\n\n def show(self, req, id):\n context = req.environ['nova.context']\n\n try:\n volume = self._volume_api.get(context, id)\n return self._view_builder.basic(req, volume)\n except exception.NotFound:\n raise webob.exc.HTTPNotFound()\n\n def delete(self, req, id):\n context = req.environ['nova.context']\n\n try:\n volume = self._volume_api.get(context, id)\n self._volume_api.delete(context, volume)\n except exception.NotFound:\n raise webob.exc.HTTPNotFound()\n\n target_link = self._view_builder._get_links(req,\n volume['id'],\n self._view_builder._collection_name)\n operation_type = \"delete\"\n\n return operations_view.generate_operation(req,\n target_link, operation_type)\n\n def create(self, req, body):\n context = req.environ['nova.context']\n\n new_volume = self._volume_api.create(context,\n int(body['sizeGb']),\n body.get('name'),\n body.get('description'))\n\n target_link = self._view_builder._get_links(req,\n new_volume[\"id\"],\n self._view_builder._collection_name)\n operation_type = \"insert\"\n\n return operations_view.generate_operation(req,\n target_link, operation_type)\n\n def _get_volumes(self, req):\n context = req.environ['nova.context']\n\n volumes = self._volume_api.get_all(context)\n\n return volumes\n\n\ndef create_resource():\n return gce_wsgi.GCEResource(Controller())\n", "sub_path": "nova/api/gce/disks.py", "file_name": "disks.py", "file_ext": "py", "file_size_in_byte": 2920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nova.api.openstack.wsgi.Controller", "line_number": 27, "usage_type": "attribute"}, {"api_name": "nova.api.openstack.wsgi", "line_number": 27, "usage_type": "name"}, {"api_name": "nova.api.gce.views.disks.ViewBuilder", "line_number": 29, "usage_type": "attribute"}, {"api_name": "nova.api.gce.views.disks", "line_number": 29, "usage_type": "name"}, {"api_name": "nova.volume.API", "line_number": 33, "usage_type": "call"}, {"api_name": "nova.volume", "line_number": 33, "usage_type": "name"}, {"api_name": "nova.api.gce.views.operations.ViewBuilder", "line_number": 34, "usage_type": "call"}, {"api_name": "nova.api.gce.views.operations", "line_number": 34, "usage_type": "name"}, {"api_name": "nova.volume", "line_number": 44, "usage_type": "name"}, {"api_name": "nova.volume", "line_number": 45, "usage_type": "argument"}, {"api_name": "nova.exception.NotFound", "line_number": 46, "usage_type": "attribute"}, {"api_name": "nova.exception", "line_number": 46, "usage_type": "name"}, {"api_name": "webob.exc.HTTPNotFound", "line_number": 47, "usage_type": "call"}, {"api_name": "webob.exc", "line_number": 47, "usage_type": "attribute"}, {"api_name": "nova.volume", "line_number": 53, "usage_type": "name"}, {"api_name": "nova.volume", "line_number": 54, "usage_type": "argument"}, {"api_name": "nova.exception.NotFound", "line_number": 55, "usage_type": "attribute"}, {"api_name": "nova.exception", "line_number": 55, "usage_type": "name"}, {"api_name": "webob.exc.HTTPNotFound", "line_number": 56, "usage_type": "call"}, {"api_name": "webob.exc", "line_number": 56, "usage_type": "attribute"}, {"api_name": "nova.volume", "line_number": 59, "usage_type": "name"}, {"api_name": "nova.api.gce.views.operations.generate_operation", "line_number": 63, "usage_type": "call"}, {"api_name": "nova.api.gce.views.operations", "line_number": 63, "usage_type": "name"}, {"api_name": "nova.api.gce.views.operations.generate_operation", "line_number": 79, "usage_type": "call"}, {"api_name": "nova.api.gce.views.operations", "line_number": 79, "usage_type": "name"}, {"api_name": "nova.api.gce.wsgi.GCEResource", "line_number": 91, "usage_type": "call"}, {"api_name": "nova.api.gce.wsgi", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "251540231", "text": "# coding: utf8\nfrom __future__ import unicode_literals\n\nfrom django import forms\nfrom django.forms.models import modelform_factory\n\nfrom rpg.models import Role, RoleConnection\n\n\nclass RequestForm(forms.Form):\n role = forms.IntegerField(label='Роль', widget=forms.Select)\n\n def __init__(self, *args, **kwargs):\n super(RequestForm, self).__init__(*args, **kwargs)\n\n self.fields['role'].widget.choices = [\n (role.id, unicode(role))\n for role in Role.objects.filter(user__isnull=True)\n ]\n\n def clean_role(self):\n try:\n return Role.objects.get(pk=self.cleaned_data['role'], user__isnull=True)\n except Role.DoesNotExist:\n raise forms.ValidationError('Неизвестная роль')\n\n def save(self, user):\n self.cleaned_data['role'].user = user\n self.cleaned_data['role'].save()\n\n user.role = self.cleaned_data['role']\n user.save()\n\nRoleForm = modelform_factory(Role, exclude=('user',))\nConnectionFormSet = forms.inlineformset_factory(Role, RoleConnection, fk_name='role', exclude=('is_locked',), extra=1)\n", "sub_path": "src/rpg/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1126, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.forms.Form", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rpg.models.Role.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "rpg.models.Role.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rpg.models.Role", "line_number": 18, "usage_type": "name"}, {"api_name": "rpg.models.Role.objects.get", "line_number": 23, "usage_type": "call"}, {"api_name": "rpg.models.Role.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rpg.models.Role", "line_number": 23, "usage_type": "name"}, {"api_name": "rpg.models.Role.DoesNotExist", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rpg.models.Role", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "django.forms.models.modelform_factory", "line_number": 34, "usage_type": "call"}, {"api_name": "rpg.models.Role", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.forms.inlineformset_factory", "line_number": 35, "usage_type": "call"}, {"api_name": "rpg.models.Role", "line_number": 35, "usage_type": "argument"}, {"api_name": "rpg.models.RoleConnection", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "10475202", "text": "from collections import deque\nN = int(input())\nA = list(map(int, input().split()))\n\nL = [(10**18, 10**18, 10**18)] * (N + 1)\nleft = 0\nright = 0\nque = deque([])\nfor i, a in enumerate(A, start=1):\n right += a\n que.append(a)\n while len(que) > 1 and abs(right - left) > abs((right - que[0]) - (left + que[0])):\n q = que.popleft()\n left += q\n right -= q\n L[i] = (abs(left - right), left, right)\n\nR = [(10**18, 10**18, 10**18)] * (N + 1)\nleft = 0\nright = 0\nque = deque([])\nfor i, a in enumerate(A[::-1], start=1):\n right += a\n que.append(a)\n while len(que) > 1 and abs(right - left) > abs((right - que[0]) - (left + que[0])):\n q = que.popleft()\n left += q\n right -= q\n R[i] = (abs(left - right), left, right)\n\nR = R[::-1]\n\nans = 10**18\nfor mid in range(1, N):\n X = [L[mid + 1][1], L[mid + 1][2], R[mid + 1][1], R[mid + 1][2]]\n if min(X) == 0:\n continue\n ans = min(ans, max(X) - min(X))\nprint(ans)\n", "sub_path": "AtCoder/arc/100d.py", "file_name": "100d.py", "file_ext": "py", "file_size_in_byte": 973, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.deque", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "568465613", "text": "from django.db import models\n\nimport uuid\n\nfrom accounts.models import Patient\n\n# Create your models here.\n\n\nclass Parameter(models.Model):\n code = models.UUIDField(verbose_name='ParameterCode', primary_key=True, default=uuid.uuid4, editable=False)\n name = models.CharField(max_length=25, verbose_name='ParameterName', null=False)\n unity_measure = models.CharField(max_length=10, verbose_name='UnityMeasure')\n patient = models.ForeignKey(Patient, models.CASCADE)\n\n\nclass Measure(models.Model):\n code = models.UUIDField(verbose_name='MeasureCode', primary_key=True, default=uuid.uuid4, editable=False)\n value = models.FloatField(verbose_name='Value', null=False)\n date = models.DateField(verbose_name='Date', null=False)\n hour = models.CharField(verbose_name='Hour', max_length=25, null=False)\n parameter = models.ForeignKey(Parameter, models.CASCADE)\n", "sub_path": "measures/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 14, "usage_type": "call"}, {"api_name": "accounts.models.Patient", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models.FloatField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "336699825", "text": "#import sys\n#sys.stdin = open(\"sample_input.txt\", \"r\")\nread = lambda :input()\nfrom collections import deque\n\ntest_cases = int(read())\n\nfor test_case in range(1, test_cases+1):\n N ,K = map(int, read().split())\n sutza = [a for a in read()]\n for i in range(len(sutza)) :\n if sutza[i] == 'A':\n sutza[i] = 10\n elif sutza[i] == 'B':\n sutza[i] = 11\n elif sutza[i] == 'C':\n sutza[i] = 12\n elif sutza[i] == 'D':\n sutza[i] = 13\n elif sutza[i] == 'E':\n sutza[i] = 14\n elif sutza[i] == 'F':\n sutza[i] = 15\n else:\n sutza[i] = int(sutza[i])\n\n numbers = []\n queue = deque(sutza)\n\n # without rotation\n for i in range(0, len(queue), N//4):\n k = N // 4 - 1\n sum = 0\n for j in range(i, i+N//4):\n sum += (16**k) * queue[j]\n k -= 1\n if sum not in numbers:\n numbers.append(sum)\n\n # with rotation :\n for _ in range(N//4-1):\n a = queue.pop()\n queue.appendleft(a)\n for i in range(0, len(queue), N // 4):\n k = N // 4 - 1\n sum = 0\n for j in range(i, i + N // 4):\n sum += (16 ** k) * queue[j]\n k -= 1\n if sum not in numbers:\n numbers.append(sum)\n numbers.sort(reverse=True)\n ans = numbers[K-1]\n print('#{} {}'.format(test_case, ans))\n", "sub_path": "Baekjoon,SWEA, etc/SWEA/SWEA5658.py", "file_name": "SWEA5658.py", "file_ext": "py", "file_size_in_byte": 1439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.deque", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "559533970", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 20 01:33:20 2018\n\n@author: aldorobles\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport pickle\nfrom sklearn.metrics import mean_squared_error,r2_score\nimport matplotlib.colors as colors\nfrom plotly.offline import plot\nimport plotly.graph_objs as go\n\n# Restore working directory function after saving plots\n## String which contains absolute path to the script file\nabspath = os.path.abspath('')\n\nif \"/Users/aldorobles\" in abspath:\n workingpath = '/Users/aldorobles/Documents/GitHub/Glucose-Insulin-MIMIC-III'\n savepath = '/Users/aldorobles/Documents/InsulinResponsePlots'\n print(\"Macbook directory\")\n \nif \"Users\\Aldo\" in abspath:\n workingpath = 'C:\\\\Users\\\\Aldo\\\\Google Drive\\\\MIT-Portugal\\\\PhD Work\\\\InsulinResponse\\\\MIMIC3'\n savepath = 'C:\\\\Users\\\\Aldo\\\\Documents\\\\PlotsInsulinResponse'\n print(\"PC directory\")\n \nif \"/Users/aldo/\" in abspath:\n workingpath = '/Users/aldo/Documents/GitHub/Glucose-Insulin-MIMIC-III'\n savepath = '/Users/aldo/Documents/PlotsInsulinResponse'\n print(\"iMac directory\")\n\nos.chdir(workingpath)\n\nimport BQueries as BQU\n\n# Insert your BigQuery Project ID Here\n# Can be found in the Google web console\nprojectid = \"hst-953-2018\" \n\ndef pltsave(indicator):\n os.chdir(indicator)\n return print(os.getcwd())\n\n#Rounding (for heatmap categories)\ndef myround(x, base):\n return int(base * round(float(x)/base))\n\n\n#%% INSULIN EVENTS\n # For the sensitivity analysis\n # WHEN itemid=223257 THEN 'Intermediate' --'Ins7030'\n # WHEN itemid=223258 THEN 'Short' --'InsRegular'\n # WHEN itemid=223259 THEN 'Intermediate' --'InsNPH'\n # WHEN itemid=223260 THEN 'Long' --'InsGlargine'\n # WHEN itemid=223261 THEN 'Intermediate' --'InsHum7525'\n # WHEN itemid=223262 THEN 'Short' --'InsHum'\n\n# Change directory\npltsave(savepath)\n\n# Get admissions\ninsulin_mv = BQU.query_Insulin()\n\n\n# Restore directory\npltsave(workingpath)\n\n# Convert dtypes\ninsulin_mv[[\"ICUSTAY_ID\",\"INFXSTOP\"]] = insulin_mv[\n [\"ICUSTAY_ID\",\"INFXSTOP\"]].apply(pd.to_numeric\n , errors='coerce')\n \ninsulin_mv.to_csv(\"InsulinInputsRAW.csv\", index=False, encoding='utf8')\n\nstats_step1_Insulin = insulin_mv.describe()\npatients_step1_sub_Insulin = insulin_mv['SUBJECT_ID'].nunique()\npatients_step1_hadm_Insulin = insulin_mv['HADM_ID'].nunique()\npatients_step1_icu_Insulin = insulin_mv['ICUSTAY_ID'].nunique()\nvalue_step1_Insulin_item = insulin_mv['ITEMID'].value_counts()\nvalue_step1_Insulin_Admin = insulin_mv['InsulinAdmin'].value_counts()\n\ninsulin_mv['CHART_STORE'] = insulin_mv['STARTTIME'] > insulin_mv['STORETIME']\n\ninsulin_mv = insulin_mv.dropna(subset=[\"ICUSTAY_ID\"])\n# Drop missing values on ICUSTAY (Aprox 24 cases)\nstats_step1_InsulinDropNa = insulin_mv.describe()\n\n # Step 2\n # Some rates are null and replaced by original rate instead\n # , (CASE WHEN ordercategoryname IN ('01-Drips','12-Parenteral Nutrition')\n # AND RATE IS null\n # THEN ORIGINALRATE ELSE RATE END) AS RATE\nMV_infusions = insulin_mv[(insulin_mv['InsulinType']==\"Short\") & \n (insulin_mv['InsulinAdmin'].str.contains('INFUSION'))]\n\nstats_step2_infxn = MV_infusions.describe()\npatients_step2_sub_infxn = MV_infusions['SUBJECT_ID'].nunique()\npatients_step2_hadm_infxn = MV_infusions['HADM_ID'].nunique()\npatients_step2_icu_infxn = MV_infusions['ICUSTAY_ID'].nunique()\n\nMV_infusion_notnull = MV_infusions.dropna(subset=['RATE'])\ndf=MV_infusion_notnull[(MV_infusion_notnull.ORIGINALRATE < 2000)]\n \n # JointPlot\ng = sns.jointplot(\"RATE\", \"ORIGINALRATE\", data=df, kind=\"reg\"\n , color=\"m\", height=7)\nplt.xlabel('Rate (U/hr)', fontsize=18)\nplt.ylabel('Set up rate (U/hr)', fontsize = 18)\nax = plt.gca()\nax.tick_params(labelsize = 16)\nplt.savefig(os.path.join(savepath,'JointPlotRate_OriginalRate.png')\n, bbox_inches='tight')\n\nstep2_infxn_r2 = r2_score(df.RATE, df.ORIGINALRATE)\nstep2_infxn_MSE = mean_squared_error(df.RATE, df.ORIGINALRATE)\n\ndel df\n\nMV_infusions['RATE'].fillna(MV_infusions['ORIGINALRATE'], inplace=True)\n\n # Step 3\n # Remove rates <= 0 U/hr\nMV_infusions = MV_infusions[MV_infusions['RATE'] > 0]\n\nstats_step3_infxn = MV_infusions.describe()\npatients_step3_sub_infxn = MV_infusions['SUBJECT_ID'].nunique()\npatients_step3_hadm_infxn = MV_infusions['HADM_ID'].nunique()\npatients_step3_icu_infxn = MV_infusions['ICUSTAY_ID'].nunique()\nvalue_step3_infxn_item = MV_infusions['ITEMID'].value_counts()\nP99_infxn = MV_infusions['RATE'].quantile(.99)\n\n # Step 4\n # Drop infusions over 99th percentile\nMV_infusions = MV_infusions[\n (MV_infusions['RATE'] < (MV_infusions['RATE'].quantile(.99)))]\n\nstats_step4_infxn = MV_infusions.describe()\npatients_step4_sub_infxn = MV_infusions['SUBJECT_ID'].nunique()\npatients_step4_hadm_infxn = MV_infusions['HADM_ID'].nunique()\npatients_step4_icu_infxn = MV_infusions['ICUSTAY_ID'].nunique()\nvalue_step4_infxn_item = MV_infusions['ITEMID'].value_counts()\n\n # Step 5\n # Find: (CASE WHEN pt.starttime < pt.storetime THEN pt.charttime ELSE pt.storetime END) AS timer_\nstep5_switch_infxn = MV_infusions[(MV_infusions.CHART_STORE == True)]\nstep5_switch_infxn['TIMEDELTA'] = abs(step5_switch_infxn.STARTTIME \n - step5_switch_infxn.STORETIME).dt.seconds\n\nstats_step5_infxn = step5_switch_infxn.describe()\npatients_step5_sub_infxn = step5_switch_infxn['SUBJECT_ID'].nunique()\npatients_step5_hadm_infxn = step5_switch_infxn['HADM_ID'].nunique()\npatients_step5_icu_infxn = step5_switch_infxn['ICUSTAY_ID'].nunique()\n\n # Scatterplottimedelta\nplt.figure(figsize=(12,10))\ndf = step5_switch_infxn.loc[step5_switch_infxn['CHART_STORE']==True]\ndf['TIMEDELTA']=df['TIMEDELTA']/60\nsns.scatterplot(x=\"TIMEDELTA\",y=\"RATE\",data=df,alpha=0.3, color=\"m\")\nplt.title('Time difference (STARTTIME - STORETIME) n='+str(df.shape[0])\n ,fontsize=18)\nplt.vlines(df['TIMEDELTA'].quantile(0.50), 0, max(df.RATE)\n , color = 'red', linestyles='--'\n , label = 'Median ('+str(int(df['TIMEDELTA'].quantile(0.5)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.95), 0, max(df.RATE)\n , color = '#34A853', linestyles='--'\n ,label='$P$ = 95$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.95)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.99), 0, max(df.RATE)\n, color = '#9CBF38', linestyles='--'\n, label='$P$ = 99$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.99)))+' min)')\n#plt.vlines(90, 0, max(df.RATE), color = '#E7BE2D', linestyles='dotted'\n# ,label='90 min')\nplt.xlabel('$\\Delta$ time (min)', fontsize=18)\nplt.ylabel('Infusion rate (U/hr)', fontsize=18)\nplt.ylim(0,(max(df.RATE)))\nplt.xlim(0,(max(df.TIMEDELTA)+5))\nax = plt.gca()\nax.tick_params(axis = 'both', which = 'major', labelsize = 18)\nax.legend(fontsize=18) \nplt.savefig(os.path.join(savepath,'ScatSTART-STORE_infxn.png'), bbox_inches='tight')\n\ndel df\n\n # Step 2\n # Short acting insulin boluses\nMV_bol_short = insulin_mv[(insulin_mv['InsulinType']==\"Short\") & \n (insulin_mv['InsulinAdmin'].str.contains('BOLUS'))]\n\nstats_step2_bol_s = MV_bol_short.describe()\npatients_step2_sub_bol_s = MV_bol_short['SUBJECT_ID'].nunique()\npatients_step2_hadm_bol_s = MV_bol_short['HADM_ID'].nunique()\npatients_step2_icu_bol_s = MV_bol_short['ICUSTAY_ID'].nunique()\nvalue_step2_bolus_s_item = MV_bol_short['ITEMID'].value_counts()\n\n # Step 3\n # Remove doses <= 0 U\nMV_bol_short = MV_bol_short[MV_bol_short['AMOUNT'] > 0]\n\nstats_step3_bol_s = MV_bol_short.describe()\npatients_step3_sub_bol_s = MV_bol_short['SUBJECT_ID'].nunique()\npatients_step3_hadm_bol_s = MV_bol_short['HADM_ID'].nunique()\npatients_step3_icu_bol_s = MV_bol_short['ICUSTAY_ID'].nunique()\nvalue_step3_bol_s_item = MV_bol_short['ITEMID'].value_counts()\nP99_bol_s = MV_bol_short['AMOUNT'].quantile(.99)\n\n # Step 4\n # Drop boluses over 99th percentile\nMV_bol_short = MV_bol_short[\n (MV_bol_short['AMOUNT'] < (MV_bol_short['AMOUNT'].quantile(.99)))]\n\nstats_step4_bol_s = MV_bol_short.describe()\npatients_step4_sub_bol_s = MV_bol_short['SUBJECT_ID'].nunique()\npatients_step4_hadm_bol_s = MV_bol_short['HADM_ID'].nunique()\npatients_step4_icu_bol_s = MV_bol_short['ICUSTAY_ID'].nunique()\nvalue_step4_bol_s_item = MV_bol_short['ITEMID'].value_counts()\n\n # Step 5\n # Find: (CASE WHEN pt.starttime < pt.storetime THEN pt.charttime ELSE pt.storetime END) AS timer_\nstep5_switch_bol_s = MV_bol_short[(MV_bol_short.CHART_STORE == True)]\nstep5_switch_bol_s['TIMEDELTA'] = abs(step5_switch_bol_s.STARTTIME \n - step5_switch_bol_s.STORETIME).dt.seconds\n\nstats_step5_bol_s = step5_switch_bol_s.describe()\npatients_step5_sub_bol_s = step5_switch_bol_s['SUBJECT_ID'].nunique()\npatients_step5_hadm_bol_s = step5_switch_bol_s['HADM_ID'].nunique()\npatients_step5_icu_bol_s = step5_switch_bol_s['ICUSTAY_ID'].nunique()\n\n # Scatterplottimedelta\nplt.figure(figsize=(12,10))\ndf = step5_switch_bol_s.loc[step5_switch_bol_s['CHART_STORE']==True]\ndf['TIMEDELTA']=df['TIMEDELTA']/60\nsns.scatterplot(x=\"TIMEDELTA\",y=\"AMOUNT\",data=df,alpha=0.3, color=\"m\")\nplt.title('Time difference (STARTTIME - STORETIME) n='+str(df.shape[0])\n ,fontsize=18)\nplt.vlines(df['TIMEDELTA'].quantile(0.50), 0, max(df.AMOUNT)\n , color = 'red', linestyles='--'\n , label = 'Median ('+str(int(df['TIMEDELTA'].quantile(0.5)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.95), 0, max(df.AMOUNT)\n , color = '#34A853', linestyles='--'\n ,label='$P$ = 95$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.95)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.99), 0, max(df.AMOUNT)\n, color = '#9CBF38', linestyles='--'\n, label='$P$ = 99$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.99)))+' min)')\nplt.xlabel('$\\Delta$ time (min)', fontsize=18)\nplt.ylabel('Bolus dose (U)', fontsize=18)\nplt.ylim(0,(max(df.AMOUNT)))\nplt.xlim(0,(max(df.TIMEDELTA)+5))\nax = plt.gca()\nax.tick_params(axis = 'both', which = 'major', labelsize = 18)\nax.legend(fontsize=18) \nplt.savefig(os.path.join(savepath,'ScatSTART-STORE_Bol_S.png')\n, bbox_inches='tight')\n\ndel df\n \n # Step 2\n # Intermediate acting insulin boluses\nMV_bol_inter = insulin_mv[(insulin_mv['InsulinType']==\"Intermediate\") & \n (insulin_mv['InsulinAdmin'].str.contains('BOLUS'))]\n\nstats_step2_bol_i = MV_bol_inter.describe()\npatients_step2_sub_bol_i = MV_bol_inter['SUBJECT_ID'].nunique()\npatients_step2_hadm_bol_i = MV_bol_inter['HADM_ID'].nunique()\npatients_step2_icu_bol_i = MV_bol_inter['ICUSTAY_ID'].nunique()\nvalue_step2_bolus_i_item = MV_bol_inter['ITEMID'].value_counts()\n\n # Step 3\n # Remove doses <= 0 U\nMV_bol_inter = MV_bol_inter[MV_bol_inter['AMOUNT'] > 0]\n\nstats_step3_bol_i = MV_bol_inter.describe()\npatients_step3_sub_bol_i = MV_bol_inter['SUBJECT_ID'].nunique()\npatients_step3_hadm_bol_i = MV_bol_inter['HADM_ID'].nunique()\npatients_step3_icu_bol_i = MV_bol_inter['ICUSTAY_ID'].nunique()\nvalue_step3_bol_i_item = MV_bol_inter['ITEMID'].value_counts()\nP99_bol_i = MV_bol_inter['AMOUNT'].quantile(.99)\n\n # Step 4\n # Drop boluses over 99th percentile (NOT CONSIDERED FOR INTERMEDIATE)\n \n # Step 5\n # Find: (CASE WHEN pt.starttime < pt.storetime THEN pt.charttime ELSE pt.storetime END) AS timer_\nstep5_switch_bol_i = MV_bol_inter[(MV_bol_inter.CHART_STORE == True)]\nstep5_switch_bol_i['TIMEDELTA'] = abs(step5_switch_bol_i.STARTTIME \n - step5_switch_bol_i.STORETIME).dt.seconds\n\nstats_step5_bol_i = step5_switch_bol_i.describe()\npatients_step5_sub_bol_i = step5_switch_bol_i['SUBJECT_ID'].nunique()\npatients_step5_hadm_bol_i = step5_switch_bol_i['HADM_ID'].nunique()\npatients_step5_icu_bol_i = step5_switch_bol_i['ICUSTAY_ID'].nunique()\n\n # Scatterplottimedelta\nplt.figure(figsize=(12,10))\ndf = step5_switch_bol_i.loc[step5_switch_bol_i['CHART_STORE']==True]\ndf['TIMEDELTA']=df['TIMEDELTA']/60\nsns.scatterplot(x=\"TIMEDELTA\",y=\"AMOUNT\",data=df,alpha=0.3, color=\"m\")\nplt.title('Time difference (STARTTIME - STORETIME) n='+str(df.shape[0])\n ,fontsize=18)\nplt.vlines(df['TIMEDELTA'].quantile(0.50), 0, max(df.AMOUNT)\n , color = 'red', linestyles='--'\n , label = 'Median ('+str(int(df['TIMEDELTA'].quantile(0.5)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.95), 0, max(df.AMOUNT)\n , color = '#34A853', linestyles='--'\n ,label='$P$ = 95$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.95)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.99), 0, max(df.AMOUNT)\n, color = '#9CBF38', linestyles='--'\n, label='$P$ = 99$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.99)))+' min)')\nplt.xlabel('$\\Delta$ time (min)', fontsize=18)\nplt.ylabel('Bolus dose (U)', fontsize=18)\nplt.ylim(0,(max(df.AMOUNT)))\nplt.xlim(0,(max(df.TIMEDELTA)+5))\nax = plt.gca()\nax.tick_params(axis = 'both', which = 'major', labelsize = 18)\nax.legend(fontsize=18) \nplt.savefig(os.path.join(savepath,'ScatSTART-STORE_Bol_i.png')\n, bbox_inches='tight')\n\ndel df\n\n # Step 2\n # Long acting insulin boluses\nMV_bol_long = insulin_mv[(insulin_mv['InsulinType']==\"Long\") & \n (insulin_mv['InsulinAdmin'].str.contains('BOLUS'))]\n\nstats_step2_bol_L = MV_bol_long.describe()\npatients_step2_sub_bol_L = MV_bol_long['SUBJECT_ID'].nunique()\npatients_step2_hadm_bol_L = MV_bol_long['HADM_ID'].nunique()\npatients_step2_icu_bol_L = MV_bol_long['ICUSTAY_ID'].nunique()\nvalue_step2_bolus_L_item = MV_bol_long['ITEMID'].value_counts()\n\n # Step 3\n # Remove doses <= 0 U\nMV_bol_long = MV_bol_long[MV_bol_long['AMOUNT'] > 0]\n\nstats_step3_bol_L = MV_bol_long.describe()\npatients_step3_sub_bol_L = MV_bol_long['SUBJECT_ID'].nunique()\npatients_step3_hadm_bol_L = MV_bol_long['HADM_ID'].nunique()\npatients_step3_icu_bol_L = MV_bol_long['ICUSTAY_ID'].nunique()\nvalue_step3_bol_L_item = MV_bol_long['ITEMID'].value_counts()\nP99_bol_L = MV_bol_long['AMOUNT'].quantile(.99)\n\n # Step 4\n # Drop boluses over 99th percentile (NOT CONSIDERED FOR LONG ACTING)\n\n # Step 5\n # Find: (CASE WHEN pt.starttime < pt.storetime THEN pt.charttime ELSE pt.storetime END) AS timer_\nstep5_switch_bol_L = MV_bol_long[(MV_bol_long.CHART_STORE == True)]\nstep5_switch_bol_L['TIMEDELTA'] = abs(step5_switch_bol_L.STARTTIME \n - step5_switch_bol_L.STORETIME).dt.seconds\n\nstats_step5_bol_L = step5_switch_bol_L.describe()\npatients_step5_sub_bol_L = step5_switch_bol_L['SUBJECT_ID'].nunique()\npatients_step5_hadm_bol_L = step5_switch_bol_L['HADM_ID'].nunique()\npatients_step5_icu_bol_L = step5_switch_bol_L['ICUSTAY_ID'].nunique()\n\n # Scatterplottimedelta\nplt.figure(figsize=(12,10))\ndf = step5_switch_bol_L.loc[step5_switch_bol_L['CHART_STORE']==True]\ndf['TIMEDELTA']=df['TIMEDELTA']/60\nsns.scatterplot(x=\"TIMEDELTA\",y=\"AMOUNT\",data=df,alpha=0.3, color=\"m\")\nplt.title('Time difference (STARTTIME - STORETIME) n='+str(df.shape[0])\n ,fontsize=18)\nplt.vlines(df['TIMEDELTA'].quantile(0.50), 0, max(df.AMOUNT)\n , color = 'red', linestyles='--'\n , label = 'Median ('+str(int(df['TIMEDELTA'].quantile(0.5)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.95), 0, max(df.AMOUNT)\n , color = '#34A853', linestyles='--'\n ,label='$P$ = 95$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.95)))+' min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.99), 0, max(df.AMOUNT)\n, color = '#9CBF38', linestyles='--'\n, label='$P$ = 99$^{TH}$ ('+str(int(df['TIMEDELTA'].quantile(0.99)))+' min)')\nplt.xlabel('$\\Delta$ time (min)', fontsize=18)\nplt.ylabel('Bolus dose (U)', fontsize=18)\nplt.ylim(0,(max(df.AMOUNT)))\nplt.xlim(0,(max(df.TIMEDELTA)+5))\nax = plt.gca()\nax.tick_params(axis = 'both', which = 'major', labelsize = 18)\nax.legend(fontsize=18) \nplt.savefig(os.path.join(savepath,'ScatSTART-STORE_Bol_L.png')\n, bbox_inches='tight')\n\ndel df\n \n # Step 6\n # Joining all insulin events\n # The time delay explored in steps 5 were not considered\nframes = [MV_bol_short, MV_bol_inter, MV_infusions, MV_bol_long]\nMV_insulin_step6 = pd.concat(frames, sort = True, verify_integrity=True\n , ignore_index=True, axis = 0)\ncols = list(MV_bol_short.columns)\nMV_insulin_step6 = MV_insulin_step6[cols]\n\ndel frames, cols\n\nstats_step6_insulin = MV_insulin_step6.describe()\npatients_step6_sub_insulin = MV_insulin_step6['SUBJECT_ID'].nunique()\npatients_step6_hadm_insulin = MV_insulin_step6['HADM_ID'].nunique()\npatients_step6_icu_insulin = MV_insulin_step6['ICUSTAY_ID'].nunique()\nvalue_step6_insulin = MV_insulin_step6['ITEMID'].value_counts()\nvalue_step6_insulin_Admin = MV_insulin_step6['InsulinAdmin'].value_counts()\n\n # Step 7\n # Joining all short acting insulin events curated\n # The time delay explored in steps 5 were not considered\nframes = [MV_bol_short, MV_infusions]\nMV_insulin_step7 = pd.concat(frames, sort = True, verify_integrity=True\n , ignore_index=True, axis = 0)\ncols = list(MV_bol_short.columns)\nMV_insulin_step7 = MV_insulin_step7[cols]\n\ndel frames, cols\n\nstats_step7_insulin = MV_insulin_step7.describe()\npatients_step7_sub_insulin = MV_insulin_step7['SUBJECT_ID'].nunique()\npatients_step7_hadm_insulin = MV_insulin_step7['HADM_ID'].nunique()\npatients_step7_icu_insulin = MV_insulin_step7['ICUSTAY_ID'].nunique()\nvalue_step7_insulin = MV_insulin_step7['ITEMID'].value_counts()\n\n # Step 8\n # Joining all short acting insulin events RAW\n # The time delay explored in steps 5 were not considered\nframes = [(insulin_mv[(insulin_mv['InsulinType']==\"Short\") & \n (insulin_mv['InsulinAdmin'].str.contains('BOLUS'))])\n, (insulin_mv[(insulin_mv['InsulinType']==\"Short\") & \n (insulin_mv['InsulinAdmin'].str.contains('INFUSION'))])]\nMV_insulin_step8 = pd.concat(frames, sort = True, verify_integrity=True\n , ignore_index=True, axis = 0)\ncols = list(MV_bol_short.columns)\nMV_insulin_step8 = MV_insulin_step8[cols]\n\ndel frames, cols\n\nstats_step8_insulin = MV_insulin_step8.describe()\npatients_step8_sub_insulin = MV_insulin_step8['SUBJECT_ID'].nunique()\npatients_step8_hadm_insulin = MV_insulin_step8['HADM_ID'].nunique()\npatients_step8_icu_insulin = MV_insulin_step8['ICUSTAY_ID'].nunique()\nvalue_step8_insulin = MV_insulin_step8['ITEMID'].value_counts()\n\n#MV_bol_long.SUBJECT_ID[MV_bol_long.SUBJECT_ID.isin(list(MV_bol_inter.SUBJECT_ID.unique()))].nunique()\n# patients_step3_sub_bol_L-195\n# patients_step3_sub_bol_i-195\n#%% GLUCOSE EVENTS\n # For the sensitivity analysis\n\n# Get admissions\nGlucose = BQU.query_Glucose()\n\n# Convert dtypes\nGlucose[[\"ICUSTAY_ID\"\n ,\"HADM_ID\"\n ,\"ITEM_GLC\"]] = Glucose[[\"ICUSTAY_ID\",\"HADM_ID\"\n ,\"ITEM_GLC\"]].apply(pd.to_numeric, errors='coerce')\n\nstats_step1_Glucose = Glucose.describe()\npatients_step1_sub_Glucose = Glucose['SUBJECT_ID'].nunique()\npatients_step1_hadm_Glucose = Glucose['HADM_ID'].nunique()\npatients_step1_icu_Glucose = Glucose['ICUSTAY_ID'].nunique()\nvalue_step1_Glucose = Glucose['ITEM_GLC'].value_counts()\n\n# Change directory\npltsave(savepath)\n\n# Save to CSV\nGlucose.to_csv(\"GlucoseReadingsRaw.csv\", index=False, encoding='utf8')\n\n# Restore directory\npltsave(workingpath)\n\n# Adding only patients with insulin doses registered\n# Get list of patients\n# Important to link Hospital admission, becase labevents table doesn't has\n# ICU stay id.\ninsulin_mv_patients = list(MV_insulin_step6.HADM_ID.dropna().unique())\n\nGlucose = Glucose[Glucose['HADM_ID'].isin(insulin_mv_patients)]\n\nstats_step1_GlcInsulin = Glucose.describe()\npatients_step1_sub_GlcInsulin = Glucose['SUBJECT_ID'].nunique()\npatients_step1_hadm_GlcInsulin = Glucose['HADM_ID'].nunique()\npatients_step1_icu_GlcInsulin = Glucose['ICUSTAY_ID'].nunique()\nvalue_step1_GlcInsulin = Glucose['ITEM_GLC'].value_counts()\n\n# Step 2\n # Remove null values (Second verification)\n # Remove duplicated: same subject id, hadm id, and charttime\nGlucose_step2 = Glucose.dropna(subset=['glucose'])\n\nGlucose = Glucose.drop_duplicates([\"SUBJECT_ID\",\"HADM_ID\",\"CHARTTIME\"]\n, keep= 'last')\nGlucose['GLCSOURCE'] = np.nan\nGlucose_step2 = Glucose_step2.drop_duplicates([\"SUBJECT_ID\",\"HADM_ID\"\n ,\"CHARTTIME\"], keep= 'last')\n\nstats_step2_Glucose = Glucose_step2.describe()\npatients_step2_sub_Glucose = Glucose_step2['SUBJECT_ID'].nunique()\npatients_step2_hadm_Glucose = Glucose_step2['HADM_ID'].nunique()\npatients_step2_icu_Glucose = Glucose_step2['ICUSTAY_ID'].nunique()\n\n# Step 3\n # Remove values above the saturation limits of measuring techniques\n # chart.itemid IN(\n # 807, -- Fingerstick Glucose\n # 811, -- Glucose (70-105) \n # 1529, -- Glucose \n # 3745, -- BloodGlucose\n # 3744, -- Blood Glucose\n # 225664,--\tGlucose finger stick\n # 220621,--\tGlucose (serum)\n # 226537 --\tGlucose (whole blood)\n # lab.itemid IN(\n # 50931 -- GLUCOSE | CHEMISTRY | BLOOD\n # 50809 -- GLUCOSE | BLOOD GAS | BLOOD)\n\n # Initialize the FacetGrid object\nsns.set(style=\"darkgrid\", rc=None)\n\n # Plot\npal = sns.cubehelix_palette((Glucose['ITEM_GLC'].nunique()), rot=-.4, light=.7)\ng = sns.FacetGrid(Glucose[Glucose.glucose < 9000]\n , row=\"ITEM_GLC\", hue=\"ITEM_GLC\", aspect = 5, palette=pal)\ng.map(sns.stripplot, \"glucose\", clip_on=False, alpha=0.8)\ng.map(plt.axvline, x=500, ymin=0, ymax=1, clip_on=False, color = 'm')\ng.map(plt.axvline, x=1000, ymin=0, ymax=1, clip_on=False, color = 'r')\nplt.xlabel('Glucose (mg/dL)', fontsize=18)\nplt.xlim(-20,(max(Glucose.glucose[Glucose.glucose < 9000])+10))\nax = plt.gca()\nax.legend(['500 mg/dL','1000 mg/dL','data'],fontsize=16)\nax.tick_params(labelsize = 16)\nplt.savefig(os.path.join(savepath,'StripGlc_beforeclean.png')\n, bbox_inches='tight')\n\n# Fingerstick \nGlucose_step3_finger = Glucose[(Glucose['ITEM_GLC'] == 807) | \n (Glucose['ITEM_GLC'] == 225664) | (Glucose['ITEM_GLC'] == 226537) | \n (Glucose['ITEM_GLC'] == 811) | (Glucose['ITEM_GLC'] == 1529) ]\nstats_step3_Glucose_finger = Glucose_step3_finger.describe()\npatients_step3_sub_Gluc_finger = Glucose_step3_finger['SUBJECT_ID'].nunique()\npatients_step3_hadm_Gluc_finger = Glucose_step3_finger['HADM_ID'].nunique()\npatients_step3_icu_Gluc_finger = Glucose_step3_finger['ICUSTAY_ID'].nunique()\nvalue_step3_Glucose_finger = Glucose_step3_finger['ITEM_GLC'].value_counts()\n\n # Remove values over 499\nGlucose_step3_finger_sat = Glucose_step3_finger[(Glucose_step3_finger['glucose'] < 500)]\nGlucose_step3_finger_sat['GLCSOURCE'] = \"FINGERSTICK\"\nstats_step3_Glucose_finger_sat = Glucose_step3_finger_sat.describe()\npatients_step3_sub_Gluc_finger_sat = Glucose_step3_finger_sat['SUBJECT_ID'].nunique()\npatients_step3_hadm_Gluc_finger_sat = Glucose_step3_finger_sat['HADM_ID'].nunique()\npatients_step3_icu_Gluc_finger_sat = Glucose_step3_finger_sat['ICUSTAY_ID'].nunique()\nvalue_step3_Glucose_finger_sat = Glucose_step3_finger_sat['ITEM_GLC'].value_counts()\n\n# Blood\nGlucose_step3_blood = Glucose[((Glucose['ITEM_GLC'] == 3745) | \n (Glucose['ITEM_GLC'] == 220621) | (Glucose['ITEM_GLC'] == 50931) | \n (Glucose['ITEM_GLC'] == 50809)\n )]\nstats_step3_Glucose_blood = Glucose_step3_blood.describe()\npatients_step3_sub_Gluc_blood = Glucose_step3_blood['SUBJECT_ID'].nunique()\npatients_step3_hadm_Gluc_blood = Glucose_step3_blood['HADM_ID'].nunique()\npatients_step3_icu_Gluc_blood = Glucose_step3_blood['ICUSTAY_ID'].nunique()\nvalue_step3_Glucose_blood = Glucose_step3_blood['ITEM_GLC'].value_counts()\n\n # Remove values over 1000\nGlucose_step3_blood_sat = Glucose_step3_blood[(Glucose_step3_blood['glucose'] < 1000)]\nGlucose_step3_blood_sat['GLCSOURCE'] = \"BLOOD\"\nstats_step3_Glucose_blood_sat = Glucose_step3_blood_sat.describe()\npatients_step3_sub_Gluc_blood_sat = Glucose_step3_blood_sat['SUBJECT_ID'].nunique()\npatients_step3_hadm_Gluc_blood_sat = Glucose_step3_blood_sat['HADM_ID'].nunique()\npatients_step3_icu_Gluc_blood_sat = Glucose_step3_blood_sat['ICUSTAY_ID'].nunique()\nvalue_step3_Glucose_blood_sat = Glucose_step3_blood_sat['ITEM_GLC'].value_counts()\n\nGlucose_step3 = pd.concat([Glucose_step3_finger_sat, Glucose_step3_blood_sat]\n, keys=['SUBJECT_ID', 'HADM_ID'], verify_integrity=True\n, ignore_index=True).copy()\n\nstats_step3_Glucose_sat = Glucose_step3.describe()\npatients_step3_sub_Gluc_sat = Glucose_step3['SUBJECT_ID'].nunique()\npatients_step3_hadm_Gluc_sat = Glucose_step3['HADM_ID'].nunique()\npatients_step3_icu_Gluc_sat = Glucose_step3['ICUSTAY_ID'].nunique()\n\npal = sns.cubehelix_palette((Glucose_step3['ITEM_GLC'].nunique())\n, rot=-.4, light=.7)\ng = sns.FacetGrid(Glucose_step3, row=\"ITEM_GLC\", hue=\"ITEM_GLC\"\n , aspect = 5, palette=pal)\ng.map(sns.stripplot, \"glucose\", clip_on=False, alpha=0.8)\ng.map(plt.axvline, x=500, ymin=0, ymax=1, clip_on=False, color = 'm')\ng.map(plt.axvline, x=1000, ymin=0, ymax=1, clip_on=False, color = 'r')\nplt.xlabel('Glucose (mg/dL)', fontsize=18)\nplt.xlim(-20,1010)\nax = plt.gca()\nax.legend(['500 mg/dL','1000 mg/dL','data'],fontsize=16)\nax.tick_params(labelsize = 16)\nplt.savefig(os.path.join(savepath,'StripGlc_afterclean.png')\n, bbox_inches='tight')\n\n\n# Step 4\n # Find: (CASE WHEN pt.charttime < pt.storetime THEN pt.charttime ELSE pt.storetime END) AS timer_\nGlucose['CHART_STORE'] = Glucose['CHARTTIME'] > Glucose['STORETIME']\nGlucose_final = Glucose_step3.copy()\nGlucose_final['CHART_STORE'] = Glucose_final['CHARTTIME'] > Glucose_final['STORETIME']\n\n# Replace\nGlucose_final.loc[(Glucose_final['CHART_STORE'] == True),'CHARTTIME'] = Glucose_final['STORETIME']\n\nstep4_switch_cases = Glucose[(Glucose.CHART_STORE == True)].copy()\nstep4_switch_cases['TIMEDELTA'] = abs(step4_switch_cases.CHARTTIME \n - step4_switch_cases.STORETIME).dt.seconds\n\nstats_step4_Glucose = step4_switch_cases.describe()\npatients_step4_sub_Glucose = step4_switch_cases['SUBJECT_ID'].nunique()\npatients_step4_hadm_Glucose = step4_switch_cases['HADM_ID'].nunique()\npatients_step4_icu_Glucose = step4_switch_cases['ICUSTAY_ID'].nunique()\nvalue_step4_Glucose = step4_switch_cases['ITEM_GLC'].value_counts()\n\n\n # Scatterplottimedelta\nplt.figure(figsize=(12,10))\ndf = step4_switch_cases.loc[step4_switch_cases['CHART_STORE']==True]\ndf['TIMEDELTA']=df['TIMEDELTA']/60\nsns.scatterplot(x=\"TIMEDELTA\",y=\"glucose\",data=df,alpha=0.3)\nplt.title('Time difference (CHARTTIME - STORETIME) n='+str(df.shape[0])\n ,fontsize=18)\nplt.vlines(df['TIMEDELTA'].quantile(0.50), 0, max(df.glucose)\n , color = 'red', linestyles='--',label='Median (25 min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.95), 0, max(df.glucose)\n , color = '#34A853', linestyles='--',label='$P$ = 95$^{TH}$ (32 min)')\nplt.vlines(df['TIMEDELTA'].quantile(0.99), 0, max(df.glucose)\n , color = '#9CBF38', linestyles='--',label='$P$ = 99$^{TH}$ (54 min)')\nplt.vlines(90, 0, max(df.glucose), color = '#E7BE2D', linestyles='dotted'\n ,label='90 min')\nplt.xlabel('$\\Delta$ time (min)', fontsize=18)\nplt.ylabel('Glucose (mg/dL)', fontsize=18)\nplt.ylim(0,(max(df.glucose)))\nplt.xlim(0,(max(df.TIMEDELTA)+100))\nax = plt.gca()\nax.tick_params(axis = 'both', which = 'major', labelsize = 18)\nax.legend(fontsize=18) \nplt.savefig(os.path.join(savepath,'ScatCHART-STORE.png'), bbox_inches='tight')\n\n # Stripplots for the items id in this list\npal = sns.cubehelix_palette((df['ITEM_GLC'].nunique()), rot=-.5, light=.7)\ng = sns.FacetGrid(df[df.glucose < 9000]\n , row=\"ITEM_GLC\", hue=\"ITEM_GLC\", aspect = 5, palette=pal)\ng.map(sns.stripplot, \"glucose\", clip_on=False, alpha=0.8)\nplt.xlabel('Glucose (mg/dL)', fontsize=18)\nplt.xlim(-20,(max(df.glucose[df.glucose < 9000])+10))\nax = plt.gca()\nax.tick_params(labelsize = 16)\nplt.savefig(os.path.join(savepath,'StripCHART-STORE_Glc.png')\n, bbox_inches='tight')\n\ndel df,pal\n\n# %% Create columns for merging\n\n# Glucose readings (14 columns)\nGlucose_final = Glucose_final[['SUBJECT_ID','HADM_ID','ICUSTAY_ID','CHARTTIME'\n ,'glucose','GLCSOURCE','CHART_STORE']]\nGlucose_final['STARTTIME'] = pd.NaT\nGlucose_final['ENDTIME'] = pd.NaT\nGlucose_final['INPUT'] = np.nan\nGlucose_final['INPUT_HRS'] = np.nan\nGlucose_final['INSULINTYPE'] = np.nan\nGlucose_final['EVENT'] = np.nan\nGlucose_final['INFXSTOP'] = np.nan\n\nGlucose_final.columns = ['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'GLCTIMER'\n , 'GLC', 'GLCSOURCE','CHART_STORE', 'STARTTIME'\n , 'ENDTIME', 'INPUT', 'INPUT_HRS', 'INSULINTYPE'\n , 'EVENT', 'INFXSTOP']\n\n# Insulin events\nMV_insulin = MV_insulin_step6[['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID'\n , 'STARTTIME', 'ENDTIME', 'AMOUNT', 'RATE'\n , 'InsulinType', 'InsulinAdmin', 'INFXSTOP' \n , 'CHART_STORE']]\nMV_insulin['GLCTIMER'] = pd.NaT\nMV_insulin['GLCSOURCE'] = np.nan\nMV_insulin['GLC'] = np.nan\n\nMV_insulin.columns = ['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'STARTTIME'\n , 'ENDTIME', 'INPUT', 'INPUT_HRS', 'INSULINTYPE' \n , 'EVENT', 'INFXSTOP', 'CHART_STORE', 'GLCTIMER'\n , 'GLC','GLCSOURCE']\n\n\ninsulin_raw = pd.read_csv(\"InsulinInputsRAW.csv\", encoding='utf8')\n\ninsulin_raw[[\"STARTTIME\",\"ENDTIME\",\"STORETIME\"]] = insulin_raw[\n [\"STARTTIME\",\"ENDTIME\",\"STORETIME\"]].apply(pd.to_datetime\n , format='%Y%m%d %H:%M:%S')\n\ninsulin_raw['GLCTIMER'] = pd.NaT\ninsulin_raw['GLCSOURCE'] = np.nan\ninsulin_raw['GLC'] = np.nan\ninsulin_raw['CHART_STORE'] = np.nan\n\ninsulin_raw = insulin_raw[['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID'\n ,'STARTTIME','ENDTIME','AMOUNT','RATE','InsulinType','InsulinAdmin'\n ,'INFXSTOP','CHART_STORE','GLCTIMER','GLC','GLCSOURCE']]\n\ninsulin_raw.columns = ['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'STARTTIME'\n , 'ENDTIME', 'INPUT', 'INPUT_HRS', 'INSULINTYPE'\n , 'EVENT', 'INFXSTOP', 'CHART_STORE'\n , 'GLCTIMER', 'GLC', 'GLCSOURCE'] \n\n# Change directory\npltsave(savepath)\n\n# Save to CSV\nGlucose_final.to_csv(\"GlucoseReadingsCurated.csv\", index=False, encoding='utf8')\nMV_insulin.to_csv(\"InsulinInputsCurated.csv\", index=False, encoding='utf8')\n\n# Restore directory\npltsave(workingpath)\n\n# %% Create datasets BOTH CURATED\n\n# Concatenate Glucose and insulin values\n\nGlc_Ins = pd.concat([MV_insulin, Glucose_final], sort = True\n , ignore_index = True)\n\n# Reorder columns\nGlc_Ins = Glc_Ins[['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'STARTTIME'\n , 'ENDTIME', 'INPUT', 'INPUT_HRS', 'INSULINTYPE' \n , 'EVENT', 'INFXSTOP', 'GLCTIMER', 'GLC', 'GLCSOURCE'\n , 'CHART_STORE']].sort_values(\n by=['SUBJECT_ID', 'HADM_ID', 'GLCTIMER', 'STARTTIME'])\n\nGlc_Ins['TIMER'] = pd.NaT\n\n# Reset index\nGlc_Ins = Glc_Ins.reset_index()\n\n# Reorder columns\nGlc_Ins = Glc_Ins[['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'TIMER' ,'STARTTIME'\n , 'GLCTIMER', 'ENDTIME', 'INPUT', 'INPUT_HRS'\n , 'INSULINTYPE', 'EVENT', 'GLC', 'GLCSOURCE'\n , 'CHART_STORE', 'INFXSTOP']].sort_values(\n by=['SUBJECT_ID', 'HADM_ID', 'GLCTIMER', 'STARTTIME'])\n\n# Replace\nGlc_Ins['TIMER'] = Glc_Ins['GLCTIMER']\nGlc_Ins['TIMER'].loc[\n (Glc_Ins['TIMER'].isnull())] = Glc_Ins.loc[\n (Glc_Ins['TIMER'].isnull()),'STARTTIME']\nGlc_Ins = Glc_Ins.sort_values(by = ['SUBJECT_ID', 'HADM_ID', 'TIMER'])\n\n# Change directory\npltsave(savepath)\n\n# Save to CSV\nGlc_Ins.to_csv(\"GlucoseInsulinUNIONALL.csv\", index=False, encoding='utf8')\n\n# Restore directory\npltsave(workingpath)\n\n#%% %% Create datasets \n\n# Concatenate Glucose CURATED and insulin RAW\n\nGlc_cur0ins_raw = pd.concat([insulin_raw, Glucose_final], sort = True\n , ignore_index = True)\n\n# Reorder columns\nGlc_cur0ins_raw = Glc_cur0ins_raw[['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'STARTTIME'\n , 'ENDTIME', 'INPUT', 'INPUT_HRS', 'INSULINTYPE' \n , 'EVENT', 'INFXSTOP', 'GLCTIMER', 'GLC', 'GLCSOURCE'\n , 'CHART_STORE']].sort_values(\n by=['SUBJECT_ID', 'HADM_ID', 'GLCTIMER', 'STARTTIME'])\n\nGlc_cur0ins_raw['TIMER'] = pd.NaT\n\n# Reset index\nGlc_cur0ins_raw = Glc_cur0ins_raw.reset_index()\n\n# Reorder columns\nGlc_cur0ins_raw = Glc_cur0ins_raw[['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'TIMER' ,'STARTTIME'\n , 'GLCTIMER', 'ENDTIME', 'INPUT', 'INPUT_HRS'\n , 'INSULINTYPE', 'EVENT', 'GLC', 'GLCSOURCE'\n , 'CHART_STORE', 'INFXSTOP']].sort_values(\n by=['SUBJECT_ID', 'HADM_ID', 'GLCTIMER', 'STARTTIME'])\n\n# Replace\nGlc_cur0ins_raw['TIMER'] = Glc_cur0ins_raw['GLCTIMER']\nGlc_cur0ins_raw['TIMER'].loc[\n (Glc_cur0ins_raw['TIMER'].isnull())] = Glc_cur0ins_raw.loc[\n (Glc_cur0ins_raw['TIMER'].isnull()),'STARTTIME']\n\nGlc_cur0ins_raw = Glc_cur0ins_raw.sort_values(by = ['SUBJECT_ID'\n , 'HADM_ID', 'TIMER'])\n\n# Change directory\npltsave(savepath)\n\n# Save to CSV\nGlc_cur0ins_raw.to_csv(\"Glc_cur0ins_raw.csv\", index=False, encoding='utf8')\n\n# Restore directory\npltsave(workingpath)\n\n#%% Admissions\n# Extraction of the ICU stay codes associated with URGENT and EMERGENCY admissions\n# (admission type column)\n\n# Change directory\npltsave(savepath)\n\nadmissions = BQU.sql_query_Admissions()\n\n# Restore directory\npltsave(workingpath)\n\n# Convert dtypes\nadmissions[\"LOS_ICU_hr\"] = admissions[\"LOS_ICU_hr\"].apply(pd.to_numeric\n , errors='coerce')\n\n# Change directory\npltsave(savepath)\n\n# Save to CSV\nadmissions.to_csv(\"AdmissionsICU.csv\", index=False, encoding='utf8')\n\n# Restore directory\npltsave(workingpath)\n\nICUinputs = admissions[[\"SUBJECT_ID\",\"HADM_ID\",\"ICUSTAY_ID\",\"ICU_ADMISSIONTIME\"\n ,\"ICU_DISCHARGETIME\",\"LOS_ICU_hr\",\"first_ICU_stay\"]].merge(\n Glc_Ins[['SUBJECT_ID','HADM_ID','TIMER','STARTTIME'\n ,'GLCTIMER','ENDTIME','INPUT','INPUT_HRS'\n ,'INSULINTYPE','EVENT','GLC','GLCSOURCE'\n ,'CHART_STORE','INFXSTOP']], how='inner'\n , on=[\"SUBJECT_ID\",\"HADM_ID\"]).sort_values(\n by=['SUBJECT_ID', 'HADM_ID', 'TIMER'])\n\n#drop measurements that occur before or after stay\nICUinputs = ICUinputs[((ICUinputs[\"GLCTIMER\"] > ICUinputs[\"ICU_ADMISSIONTIME\"]) &\n (ICUinputs[\"GLCTIMER\"] < ICUinputs[\"ICU_DISCHARGETIME\"])) | \n ((ICUinputs[\"STARTTIME\"] > ICUinputs[\"ICU_ADMISSIONTIME\"]) & \n (ICUinputs[\"ENDTIME\"] < ICUinputs[\"ICU_DISCHARGETIME\"]))]\n \n# Glucose CURATED and insulin RAW\nGlc_cur0ins_raw = admissions[[\"SUBJECT_ID\",\"HADM_ID\",\"ICUSTAY_ID\",\"ICU_ADMISSIONTIME\"\n ,\"ICU_DISCHARGETIME\",\"LOS_ICU_hr\",\"first_ICU_stay\"]].merge(\n Glc_cur0ins_raw[['SUBJECT_ID','HADM_ID','TIMER','STARTTIME'\n ,'GLCTIMER','ENDTIME','INPUT','INPUT_HRS'\n ,'INSULINTYPE','EVENT','GLC','GLCSOURCE'\n ,'CHART_STORE','INFXSTOP']], how='inner'\n , on=[\"SUBJECT_ID\",\"HADM_ID\"]).sort_values(\n by=['SUBJECT_ID', 'HADM_ID', 'TIMER'])\n\n# %% Analyze alignment\n\n# Glucose non adjusted and insulin non-adjusted\n# 90 min before an insulin dose\n\n# Glucose adjusted\n\n# Change directory\npltsave(savepath)\n\nICUinputs_adjusted = BQU.query_alignment_adjusted()\n\n# Convert dtypes\nICUinputs_adjusted[[\"Repeated\",\"INFXSTOP\",\"RULE\"]] = ICUinputs_adjusted[\n [\"Repeated\",\"INFXSTOP\",\"RULE\"]].apply(pd.to_numeric, errors='coerce')\n\n\n# Save to CSV\nICUinputs_adjusted.to_csv(\"ICUinputs_adjusted.csv\", index=False, encoding='utf8')\n\n# Restore directory\npltsave(workingpath)\n\n# Get statistics\nICUinputs_adjusted = ICUinputs_adjusted[ICUinputs_adjusted['Repeated']!=1]\nstats_ICUinputs_adjusted = ICUinputs_adjusted.describe()\npatients_ICUinputs_adjusted = ICUinputs_adjusted['SUBJECT_ID'].nunique()\n\n# Filtering for only short insulin boluses and all sources of glucose\nshort_BOL_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Short\") & \n (ICUinputs_adjusted['EVENT'].str.contains('BOLUS'))]\n\nstats_short_BOL_adjusted = short_BOL_adjusted.describe()\npatients_short_BOL_adjusted = short_BOL_adjusted['SUBJECT_ID'].nunique()\n\n# Heatmap\nshort_BOL_heat = short_BOL_adjusted.dropna(subset=['GLC_AL']).copy()\nshort_BOL_heat['A'] = ((short_BOL_heat['GLCTIMER_AL'] - \n short_BOL_heat['STARTTIME'])/pd.Timedelta('1 minute'))*60\nshort_BOL_heat=short_BOL_heat.set_index('A')\n\n#Define the cell size on the heat map\nglc_base=25\nins_base=2\n\n#Define heatmap limits\nxlow=0\nxhigh=P99_bol_s\nylow=90\nyhigh=450\nxhigh-=ins_base\n\n#create categories for constructing the heatmap\nshort_BOL_heat['glc_cat']=(short_BOL_heat['GLC_AL'].apply(\n lambda x: myround(x, glc_base))/glc_base)\nshort_BOL_heat['ins_cat']=(short_BOL_heat['INPUT'].apply(\n lambda x: myround(x, ins_base))/ins_base)\n\n#create dataframe for the heatmap using pivot_table\nheat_df=pd.pivot_table(short_BOL_heat, values='ICUSTAY_ID', index=['glc_cat']\n, columns=['ins_cat'], aggfunc='count')\n#trim the heatmap dataframe based on the lmits specificed\nheat_df=heat_df.loc[ylow/glc_base:yhigh/glc_base:,xlow/ins_base:xhigh/ins_base:]\n\n#create labels for the x and y ticks\nheat_xtick=np.arange(xlow, xhigh+ins_base*2, ins_base)\nheat_ytick=np.arange(ylow, yhigh+glc_base*1, glc_base)\n\n#plot heatmap\nsns.set(style=\"ticks\", font_scale=1.2)\nfig, ax = plt.subplots(1, 1, figsize = (12, 12))\nax=sns.heatmap(heat_df, robust=True, annot=True, cmap=\"BuPu\", fmt=\"2.0f\"\n , xticklabels=heat_xtick, yticklabels=heat_ytick\n , norm=colors.PowerNorm(gamma=1./2.))\n\n#titles\nplt.title(\"Glucose readings prior to short-acting insulin bolus\", fontsize=25)\nplt.ylabel(\"Blood glucose (mg/dL)\", fontsize=20)\nplt.xlabel(\"Insulin dose (U)\", fontsize=20)\n\n#invert axis and offset labels\nax.invert_yaxis()\nax.set_yticks(np.arange(0, ((yhigh-ylow)/glc_base)+1))\nax.set_xticks(np.arange(0, ((xhigh-xlow)/ins_base)+2))\nplt.savefig(os.path.join(savepath,'ShortBol_ADJ_heatmap.png'))\n\n\n# Filtering for only short insulin infusions and all sources of glucose\nshort_INF_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Short\") & \n (ICUinputs_adjusted['EVENT'].str.contains('INFUSION'))]\n\nstats_short_INF_adjusted = short_INF_adjusted.describe()\npatients_short_INF_adjusted = short_INF_adjusted['SUBJECT_ID'].nunique()\n\n# plots\nshort_INF_adjusted['GLCcat'] = pd.cut(short_INF_adjusted['GLC_AL']\n, bins=[0,100,150,200,250,300,350,400,1000]\n, include_lowest=True, right=False) #8 categories\n\nrangesplot2 = ['glc <100','100≥ glc <150', '150≥ glc <200'\n , '200≥ glc <250', '250≥ glc <300', '300≥ glc <350',\n '350≥ glc <400', '400≥ glc']\n\n\nplt.figure(figsize=(12,10))\nsns.boxplot(x=\"GLCcat\", y=\"INPUT_HRS\", data=short_INF_adjusted, meanline=True\n , showmeans=True, palette=\"BuPu\"\n , meanprops={'linestyle': '--', 'linewidth': 2.5\n , 'color': 'white'} )\nplt.xticks(plt.xticks()[0], rangesplot2,rotation=45)\nplt.title('Glucose readings before any insulin infusion (short-acting)',fontsize=18)\nplt.ylabel('Insulin (U/hr)', fontsize=18)\nplt.xlabel('Glucose (mg/dL)', fontsize=18)\nax = plt.gca()\nax.tick_params(axis = 'both', which = 'major', labelsize = 18)\nplt.tight_layout()\nplt.savefig(os.path.join(savepath,'Boxplotshort_INF_adjusted.png'))\n\ndel rangesplot2\n\n# Filtering for only intermediate insulin boluses and all sources of glucose\ninterm_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Intermediate\") & \n (ICUinputs_adjusted['EVENT'].str.contains('BOLUS'))]\n\nstats_interm_adjusted = interm_adjusted.describe()\npatients_interm_adjusted = interm_adjusted['SUBJECT_ID'].nunique()\n\n# Filtering for only long insulin boluses and all sources of glucose\nlong_adjusted = ICUinputs_adjusted[\n (ICUinputs_adjusted['INSULINTYPE']==\"Long\") & \n (ICUinputs_adjusted['EVENT'].str.contains('BOLUS'))]\n\nstats_long_adjusted = long_adjusted.describe()\npatients_long_adjusted = long_adjusted['SUBJECT_ID'].nunique()\n\n#%% Sankey\n\n# Glucose control over 7 days\n\n# Get the timedelta between admission time and TIMER\nICUinputs_adjusted['TIMERm'] = (ICUinputs_adjusted['TIMER']-\n ICUinputs_adjusted['ICU_ADMISSIONTIME'])/pd.Timedelta('1 minute')\n\nICUinputs_adjusted['TIMERs'] = pd.to_timedelta(ICUinputs_adjusted['TIMERm']*60\n , unit='s')\n\n# Dictionary for storage\nglc_ts_d={}\n\n# Create daily mean GLC time series for each stay\nfor icu_stay in ICUinputs_adjusted.ICUSTAY_ID.unique():\n # get the time series\n ts=ICUinputs_adjusted.loc[ICUinputs_adjusted[\"ICUSTAY_ID\"]==icu_stay,['TIMERs','GLC','GLC_AL']]\n # Replace\n ts['GLC'].loc[(ts['GLC'].isnull())] = ts.loc[(ts['GLC'].isnull()),'GLC_AL']\n # Remove last column\n ts = ts[['TIMERs','GLC']]\n # Drop NaN\n ts=ts.dropna(subset=[\"GLC\"])\n # Set index timestamp\n ts = ts.set_index(['TIMERs'])\n # Add a NaN entry to anchor the time stamp to zero\n tmp = pd.DataFrame([np.nan],index=[ts.index[0]-ts.index[0]],columns=['GLC'])\n ts = ts.append(tmp)\n ts = ts.sort_index()\n # Resample to daily\n ts=ts.resample(\"D\").median()\n # Convert dataframe into dictionary\n ts=ts.T.to_dict('list')\n #save in dictionary\n glc_ts_d[icu_stay]=ts\n \ndaily_glucose_df=pd.DataFrame.from_dict(glc_ts_d, orient=\"index\", dtype = float)\n\n# Save column names\nlist_columns = list(daily_glucose_df.columns)\n\ndaily_glucose_df = pd.concat((daily_glucose_df[col].apply(pd.Series) for col in daily_glucose_df), axis=1)\ndaily_glucose_df.columns = list_columns\n\ndel ts, tmp, list_columns\n\n# Select only the first week of results\ndaily_gs=daily_glucose_df.iloc[:,0:7].dropna().copy()\n\n# Define states of hyper or hipoglycemia\ndaily_gs[(daily_gs < 80)]=1\ndaily_gs[(daily_gs <= 180) & (daily_gs >= 80)]=2\ndaily_gs[(daily_gs > 180)]=3\n# Convert into integers\ndaily_gs=daily_gs.astype(int)\n# Create column headers [1 to 7]\ndaily_gs.columns=np.arange(1,8)\n\n# Find daily transition probabilities\ntransitions_d={}\n\n#Set up transition matrix for each day\nfor day in np.arange(1,7):\n # Creates a list (vector) of 3 elements each one, 3 times \n transitions_d[\"Day\"+str(day)]=[[0]*3 for _ in range (3)]\n\n# Count the number of transitions for each day\nfor day in np.arange(1,7):\n for (i,j) in zip(daily_gs[day].values, daily_gs[day+1].values):\n # i is the list's row number\n # j is the list's column number\n transitions_d[\"Day\"+str(day)][i-1][j-1]+=1\n\n# Normalise the counts into probabilities\ntransitions_d_prob=(transitions_d).copy()\nfor day in np.arange(1,7):\n for row in transitions_d_prob[\"Day\"+str(day)]:\n s = sum(row)\n if s > 0:\n row[:] = [f/s for f in row]\n\nsource_list=[]\nfor i in np.arange(1,19):\n for n in np.arange(1,4):\n source_list.append(i)\n\ntarget_list=[]\nfor i in np.arange(3,19,3):\n temp=[i+1,i+2,i+3]*3\n for item in temp:\n target_list.append(item)\n \nvalue_list=[]\nfor entry in zip(source_list, target_list):\n day=int(1+(entry[0]-1)/3)\n row=((entry[0]-1)%3)\n col=(entry[1]-day*3-1)\n value_list.append(transitions_d[\"Day\"+str(day)][row][col])\n \n# Create dataframe for sankey\nsankey_d=pd.DataFrame({\"Source\":source_list, \"Target\": target_list, \"Value\":value_list})\nsankey_d[\"Colour\"]=\"rgba(144,196,151,0.8)\"\nsankey_d.loc[sankey_d[\"Target\"]%3==2,\"Colour\"]=\"rgba(252,42,58,0.8)\"\nsankey_d.loc[sankey_d[\"Target\"]%3==1,\"Colour\"]=\"rgba(237,118,59,0.8)\"\nsankey_d[\"Label\"]=\"\"\nsankey_d.at[3, \"Label\"] = \"Hyperglycaemic\"\nsankey_d.at[1, \"Label\"] = \"Hypoglycaemic\"\n\ni=1\nfor row in np.arange(0, sankey_d.shape[0]):\n if sankey_d.loc[row].loc[\"Target\"]%3 ==0:\n sankey_d.at[row, \"Label\"] = \"Day\" + str(i)\n i+=1\n\n \nsankey_d[\"Link Colour\"]=\"rgba(32,32,32,0.2)\"\n\ndata_trace=dict(\n type='sankey',\n domain= dict(\n x= [0,1],\n y= [0,1]\n ),\n orientation = \"h\",\n valueformat=\".0f\",\n #opacity=0.5,\n node = dict(\n pad = 15,\n thickness = 30,\n line = dict(\n color = \"black\",\n width= 0.5\n ),\n label = sankey_d['Label'].dropna(axis=0, how=\"any\"),\n color = sankey_d[\"Colour\"]\n ),\n link = dict(\n source=sankey_d['Source'].dropna(axis=0, how=\"any\"),\n target=sankey_d['Target'].dropna(axis=0, how=\"any\"),\n value=sankey_d['Value'].dropna(axis=0, how=\"any\"),\n color=sankey_d['Link Colour'].dropna(axis=0, how=\"any\")\n \n )\n)\n\n\nlayout = dict(\n title = \"Daily median glucose in the ICU\",\n height = 700,\n width = 1000,\n titlefont = dict(\n size = 20\n ), \n font = dict(\n size = 12\n ), \n)\n\n#py.init_notebook_mode(connected=True)\n\nfig = dict(data=[data_trace], layout=layout)\nfig = go.Figure(fig)\nplot(fig)\n#py.iplot(fig, validate=False, filename='Test')\n\n#%% SAVE WORKSPACE\n\ndata4save = [stats_step1_Glucose, stats_step2_Glucose\n , stats_step3_Glucose_blood, stats_step3_Glucose_blood_sat\n , stats_step3_Glucose_finger, stats_step3_Glucose_finger_sat\n , stats_step3_Glucose_sat, stats_step4_Glucose\n , insulin_mv, MV_insulin_step6, Glc_cur0ins_raw, ICUinputs\n , admissions, ICUinputs_adjusted]\n\n# Change directory\npltsave(savepath)\n \nwith open('SensitivityAnalysis.pickle', 'wb') as f:\n pickle.dump(data4save, f)\n \nwith open('InsulinGlucoseDexVars.pickle', 'rb') as f:\n SavedVars = pickle.load(f)\n\nwith open('Vars2share.pickle', 'rb') as f:\n SentVars = pickle.load(f)\n", "sub_path": "SensitivityAnalysis.py", "file_name": "SensitivityAnalysis.py", "file_ext": "py", "file_size_in_byte": 45709, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.abspath", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 49, "usage_type": "call"}, {"api_name": "BQueries.query_Insulin", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 77, "usage_type": "attribute"}, {"api_name": "seaborn.jointplot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 244, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path", "line_number": 257, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 314, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 314, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path", "line_number": 321, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 363, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 366, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 367, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 367, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 369, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 369, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 378, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 378, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 379, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 379, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 380, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 381, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 381, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 382, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 385, "usage_type": "call"}, {"api_name": "os.path", "line_number": 385, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 394, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 412, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 432, "usage_type": "call"}, {"api_name": "BQueries.query_Glucose", "line_number": 452, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 458, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 496, "usage_type": "attribute"}, {"api_name": "seaborn.set", "line_number": 521, "usage_type": "call"}, {"api_name": "seaborn.cubehelix_palette", "line_number": 524, "usage_type": "call"}, {"api_name": "seaborn.FacetGrid", "line_number": 525, "usage_type": "call"}, {"api_name": "seaborn.stripplot", "line_number": 527, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 528, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 528, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 529, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 529, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 530, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 530, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 531, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 531, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 532, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 532, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 535, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 535, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 535, "usage_type": "call"}, {"api_name": "os.path", "line_number": 535, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 577, "usage_type": "call"}, {"api_name": "seaborn.cubehelix_palette", "line_number": 586, "usage_type": "call"}, {"api_name": "seaborn.FacetGrid", "line_number": 588, "usage_type": "call"}, {"api_name": "seaborn.stripplot", "line_number": 590, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 591, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 591, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 592, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 592, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 593, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 593, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 594, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 594, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 595, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 598, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 598, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 598, "usage_type": "call"}, {"api_name": "os.path", "line_number": 598, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 623, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 623, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 626, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 627, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 627, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 629, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 629, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 631, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 631, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 633, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 633, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 635, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 635, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 637, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 637, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 638, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 638, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 639, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 639, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 640, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 640, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 641, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 641, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 644, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 644, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 644, "usage_type": "call"}, {"api_name": "os.path", "line_number": 644, "usage_type": "attribute"}, {"api_name": "seaborn.cubehelix_palette", "line_number": 647, "usage_type": "call"}, {"api_name": "seaborn.FacetGrid", "line_number": 648, "usage_type": "call"}, {"api_name": "seaborn.stripplot", "line_number": 650, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 651, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 651, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 652, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 652, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 653, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 653, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 655, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 655, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 655, "usage_type": "call"}, {"api_name": "os.path", "line_number": 655, "usage_type": "attribute"}, {"api_name": "pandas.NaT", "line_number": 665, "usage_type": "attribute"}, {"api_name": "pandas.NaT", "line_number": 666, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 667, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 668, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 669, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 670, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 671, "usage_type": "attribute"}, {"api_name": "pandas.NaT", "line_number": 683, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 684, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 685, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 693, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 696, "usage_type": "attribute"}, {"api_name": "pandas.NaT", "line_number": 699, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 700, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 701, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 702, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 727, "usage_type": "call"}, {"api_name": "pandas.NaT", "line_number": 737, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 769, "usage_type": "call"}, {"api_name": "pandas.NaT", "line_number": 779, "usage_type": "attribute"}, {"api_name": "BQueries.sql_query_Admissions", "line_number": 816, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 822, "usage_type": "attribute"}, {"api_name": "BQueries.query_alignment_adjusted", "line_number": 869, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 873, "usage_type": "attribute"}, {"api_name": "pandas.Timedelta", "line_number": 898, "usage_type": "call"}, {"api_name": "pandas.pivot_table", "line_number": 919, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 925, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 926, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 929, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 930, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 930, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 931, "usage_type": "call"}, {"api_name": "matplotlib.colors.PowerNorm", "line_number": 933, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 933, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 936, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 936, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 937, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 937, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 938, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 938, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 942, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 943, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 944, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 944, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 944, "usage_type": "call"}, {"api_name": "os.path", "line_number": 944, "usage_type": "attribute"}, {"api_name": "pandas.cut", "line_number": 956, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 965, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 965, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 966, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 970, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 970, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 971, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 971, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 972, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 972, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 973, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 973, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 974, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 974, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 976, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 976, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 977, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 977, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 977, "usage_type": "call"}, {"api_name": "os.path", "line_number": 977, "usage_type": "attribute"}, {"api_name": "pandas.Timedelta", "line_number": 1003, "usage_type": "call"}, {"api_name": "pandas.to_timedelta", "line_number": 1005, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1024, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 1024, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 1034, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1034, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 1039, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1039, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 1054, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1060, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1065, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1073, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1080, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1081, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1085, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1098, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1107, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 1159, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 1159, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 1160, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 1176, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 1179, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 1182, "usage_type": "call"}]} +{"seq_id": "510373128", "text": "import matplotlib\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndelta = 0.025\r\nx = np.arange(-3.0, 3.0, delta)\r\ny = np.arange(-1.0, 1.0, delta)\r\nX,Y = np.meshgrid(x, y)\r\nZ=X+Y\r\n# difference of Gaussians\r\n\r\n\r\n\r\n\r\n# You can set negative contours to be solid instead of dashed:\r\nmatplotlib.rcParams['contour.negative_linestyle'] = 'solid'\r\nplt.figure()\r\nCS = plt.contour(X, Y, Z, 6,colors='k', )\r\nplt.clabel(CS, fontsize=9, inline=1)\r\nplt.title('Single color - negative contours solid')\r\nplt.show()", "sub_path": "hw1/hw1_all/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 507, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.arange", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "319738691", "text": "#!/usr/bin/env python3\n# (c) 2017 Landon A Marchant\n\"\"\" Imports CSV of states and their Google Trend search index regarding the Trump transition.\n\"\"\"\n\n\nimport csv\nimport matplotlib.pyplot as plt\n\nGOOGLE_TREND = []\n\nwith open('regional_search_interest_trump_last_week.csv', 'r') as f:\n CSVF = csv.reader(f)\n for row in CSVF:\n\n state_name, ranking_number = row[0].split('-') #throwing value error Empty Separator\n\n STATE = int(state_name)\n SEARCH_RANK = int(ranking_number)\n GOOGLE_TREND.append(SEARCH_RANK - STATE)\n\nRANKING = len(GOOGLE_TREND)\n\nplt.title('30 Day Google Trend Ranking by State: Trump Transition')\nplt.xlabel('Search Ranking Out of 100')\nplt.ylabel('State')\nplt.plot(range(1, RANKING+1), GOOGLE_TREND, 'r-') # plot in red using lines. gamesAbove is y\nplt.axis([1, RANKING, 0, 110]) # range of data I want to see. 1,n = x range, 0,50 = y\nplt.savefig('trumptrend.pdf')\n", "sub_path": "project_04/TrumpTrends.py", "file_name": "TrumpTrends.py", "file_ext": "py", "file_size_in_byte": 903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "csv.reader", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "247068823", "text": "# author = uakf.b\n# version = 1.0\n# name = gamequeue\n# fullname = plugins/pychop/gamequeue\n# description = If you have multiple GHost instances, this is great. It allows for a hosting system where users host games through the channel bot, and then channel bot forwards commands to host bots.\n\n### modify settings below\n\n# dictionary mapping from botid to (username, trigger)\ngqBots = {}\ngqBots[1] = ('yourfirstbot', '!',)\ngqBots[2] = ('yoursecondbot', '-',)\n\n# access needed to create games\ngqAccess = 0\n\n# whether or not to check gamelist patch (if false, !priv will be disabled)\ngqGamelist = True\n\n# path to maps\ngqMapPath = \"/home/ghost/maps\"\n\n# path to map configuration files\ngqCfgPath = \"/home/ghost/mapcfgs\"\n\n### end settings\n\n# plugin db instance if gqGamelist is enabled\npdb = 0\n\n# last time that we tried to host a game\nlastTime = 0\n\n# list of maps\nmapList = []\n\n# list of map configs\ncfgList = []\n\n# dictionary from username to tuple (\"map\" or \"cfg\", loaded map or cfg name)\nuserMaps = {}\n\n# bnet to use\ngqBnet = 0\n\n# the last time that each bot was used\ngqBotTime = {}\n\nfrom collections import deque\n\n# queue containing tuples (username, command, maptype (load or map), mapname, gamename)\nhostQueue = deque()\n\nimport host\nimport MySQLdb\nimport random\nimport os\nimport time\nfrom plugindb import PluginDB\n\ndef init():\n\tglobal pdb\n\thost.registerHandler('ProcessCommand', onCommand)\n\thost.registerHandler('Update', onUpdate)\n\n\tif gqGamelist:\n\t\tpdb = PluginDB()\n\t\tpdb.dbconnect()\n\t\n\trefreshMaps()\n\ndef deinit():\n\thost.unregisterHandler('ProcessCommand', onCommand)\n\thost.unregisterHandler('Update', onUpdate)\n\ndef refreshMaps():\n\tglobal mapList, cfgList\n\t\n\tprint(\"[GAMEQUEUE] Refreshing internal map list...\")\n\tmapList = os.listdir(gqMapPath)\n\tcfgList = os.listdir(gqCfgPath)\n\ndef onUpdate(chop):\n\tglobal lastTime, gqBotTime\n\n\tif gettime() - lastTime > 3000 and hostQueue and gqBnet != 0:\n\t\tlastTime = gettime()\n\n\t\t# first thing is to check if we have a bot available\n\t\t# find all bots in channel\n\t\tpotentialBots = dict(gqBots) # create a copy so we don't modify original\n\t\tchannelUsers = gqBnet.getChannelNameList()\n\t\t\n\t\tfor key in potentialBots.keys():\n\t\t\tif not potentialBots[key][0].lower() in channelUsers:\n\t\t\t\tdel potentialBots[key]\n\t\t\n\t\t# remove bots that have been used too recently\n\t\tfor key in potentialBots.keys():\n\t\t\tif gettime() - gqBotTime.get(key, 0) < 10000:\n\t\t\t\tdel potentialBots[key]\n\t\t\n\t\tif gqGamelist:\n\t\t\t# now, delete remaining bots that have a game in gamelist\n\t\t\tpdb.execute(\"SELECT gamename, botid FROM gamelist\");\n\t\t\t\n\t\t\tresult_set = pdb.getCursor().fetchall()\n\t\t\t\n\t\t\tfor row in result_set:\n\t\t\t\tbotid = int(row[1])\n\t\t\t\t\n\t\t\t\tif row[0] != \"\" and botid in potentialBots.keys():\n\t\t\t\t\tdel potentialBots[botid]\n\t\t\t\n\t\tif len(potentialBots) > 0:\n\t\t\tfirstEntry = hostQueue.popleft()\n\t\t\tusername = firstEntry[0]\n\t\t\tcommand = firstEntry[1] # either pub or priv\n\t\t\tmaptype = firstEntry[2]\n\t\t\tmapname = firstEntry[3]\n\t\t\tgamename = firstEntry[4]\n\t\t\t\n\t\t\tif gqGamelist:\n\t\t\t\t# make sure user doesn't already have game\n\t\t\t\tpdb.execute(\"SELECT COUNT(*) FROM gamelist WHERE ownername = %s OR creatorname = %s\", (username.lower(), username.lower()))\n\t\t\t\trow = pdb.getCursor().fetchone()\n\t\t\t\t\n\t\t\t\tif row[0] > 0:\n\t\t\t\t\tgqBnet.queueChatCommand(\"/w \" + username + \" You already have a game in lobby!\")\n\t\t\t\t\treturn\n\n\t\t\t# select a bot at random from the remaining list\n\t\t\trandIndex = random.choice(potentialBots.keys())\n\t\t\tbotName = potentialBots[randIndex][0]\n\t\t\tbotTrigger = potentialBots[randIndex][1]\n\t\t\t\n\t\t\t# update the time that this bot was used\n\t\t\tgqBotTime[randIndex] = gettime()\n\t\t\t\n\t\t\tif command == \"priv\" and not gqGamelist:\n\t\t\t\tcommand = \"pub\"\n\t\t\t\n\t\t\ttargetString = command + \"by \" + username + \" \" + gamename\n\t\t\t\n\t\t\tgqBnet.queueChatCommand(\"/w \" + botName + \" \" + botTrigger + maptype + \" \" + mapname) # !load or !map the map\n\t\t\tgqBnet.queueChatCommand(\"/w \" + botName + \" \" + botTrigger + targetString)\n\t\t\tgqBnet.queueChatCommand(\"/w \" + username + \" Your game [\" + gamename + \"] should now be hosted on [\" + botName + \"]!\")\n\n\ndef onCommand(bnet, user, command, payload, nType):\n\tglobal gqBnet\n\tgqBnet = bnet\n\twhisper = nType == 1\n\n\tlowername = user.getName().lower()\n\t\n\tif user.getAccess() >= gqAccess:\n\t\tif command == \"priv\" or command == \"pub\":\n\t\t\tif lowername in userMaps.keys():\n\t\t\t\tmapinfo = userMaps[lowername]\n\t\t\t\tgamename = payload\n\t\t\t\t\n\t\t\t\t# make sure this user hasn't hosted already\n\t\t\t\tduplicate = False\n\t\t\t\tfor entry in hostQueue:\n\t\t\t\t\tif entry[0] == lowername:\n\t\t\t\t\t\tduplicate = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tif not duplicate:\n\t\t\t\t\thostQueue.append((lowername, command, mapinfo[0], mapinfo[1], gamename,))\n\t\t\t\t\tbnet.queueChatCommand(\"Your game has been queued (your position: \" + str(len(hostQueue)) + \")\", user.getName(), whisper)\n\t\t\t\telse:\n\t\t\t\t\tbnet.queueChatCommand(\"Error: you have a game in queue already; use !unhost to unqueue that game first\", user.getName(), whisper)\n\t\t\telse:\n\t\t\t\tbnet.queueChatCommand(\"Error: you do not have any map file loaded!\", user.getName(), whisper)\n\t\telif command == \"unhost\":\n\t\t\tfoundEntry = 0\n\t\t\tfor entry in hostQueue:\n\t\t\t\tif entry[0] == lowername:\n\t\t\t\t\tfoundEntry = entry\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tif foundEntry != 0:\n\t\t\t\thostQueue.remove(foundEntry)\n\t\telif command == \"map\" or command == \"load\":\n\t\t\tif payload != \"\":\n\t\t\t\tpayload = payload.lower() # case insensitive search\n\t\t\t\tlastMatch = \"\"\n\t\t\t\tfoundMatches = \"\"\n\t\t\t\tcountMatches = 0\n\t\t\t\t\n\t\t\t\ttargetList = mapList\n\t\t\t\tif command == \"load\":\n\t\t\t\t\ttargetList = cfgList\n\t\t\t\t\n\t\t\t\tfor fname in targetList:\n\t\t\t\t\tfname_lower = fname.lower()\n\t\t\t\t\t\n\t\t\t\t\t# extract stem for exact stem match\n\t\t\t\t\tif fname.find(\".\") != -1:\n\t\t\t\t\t\tstem = fname_lower.rsplit(\".\", 1)[1]\n\t\t\t\t\t\t\n\t\t\t\t\t\tif payload == fname_lower or payload == stem:\n\t\t\t\t\t\t\tcountMatches = 1\n\t\t\t\t\t\t\tlastMatch = fname\n\t\t\t\t\t\t\tbreak # stop iterating if we have an exact match\n\t\t\t\t\t\telif payload in fname_lower:\n\t\t\t\t\t\t\tcountMatches = countMatches + 1\n\t\t\t\t\t\t\tlastMatch = fname\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif foundMatches == \"\":\n\t\t\t\t\t\t\t\tfoundMatches = fname\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfoundMatches += \", \" + fname\n\t\t\t\t\n\t\t\t\tif countMatches == 0:\n\t\t\t\t\tif command == \"map\":\n\t\t\t\t\t\tbnet.queueChatCommand(\"No maps found with that name.\", user.getName(), whisper)\n\t\t\t\t\telse:\n\t\t\t\t\t\tbnet.queueChatCommand(\"No map configuration found with that name. Use !map for normal map files.\", user.getName(), whisper)\n\t\t\t\telif countMatches == 1:\n\t\t\t\t\tbnet.queueChatCommand(\"Loading map file [\" + lastMatch + \"].\", user.getName(), whisper)\n\t\t\t\t\tuserMaps[lowername] = (command, lastMatch,)\n\t\t\t\telse:\n\t\t\t\t\tbnet.queueChatCommand(\"Maps found: \" + foundMatches, user.getName(), whisper)\n\t\t\telse:\n\t\t\t\tif lowername in userMaps.keys():\n\t\t\t\t\tbnet.queueChatCommand(\"Your currently loaded map file is [\" + userMaps[lowername][1] + \"].\", user.getName(), whisper)\n\t\t\t\telse:\n\t\t\t\t\tbnet.queueChatCommand(\"You currently do not have any map file loaded.\", user.getName(), whisper)\n\t\telif command == \"gamequeue\" and payload == \"pos\":\n\t\t\t# find user's position in the hosting queue\n\t\t\tcounter = 0\n\t\t\tfound = 0\n\t\t\t\n\t\t\tfor entry in hostQueue:\n\t\t\t\tcounter = counter + 1\n\t\t\t\t\n\t\t\t\tif lowername == entry[0]:\n\t\t\t\t\tbnet.queueChatCommand(\"Your position in queue: \" + str(counter), user.getName(), whisper)\n\t\t\t\t\tfound = 1\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tif found == 0:\n\t\t\t\tbnet.queueChatCommand(\"You were not found in the queue.\", user.getName(), whisper)\n\t\telif command == \"gamequeue\" and payload == \"refresh\":\n\t\t\trefreshMaps()\n\t\t\tbnet.queueChatCommand(\"Refreshed internal maps list\", user.getName(), whisper)\n\t\telif user.getAccess() == 10 and command == \"gamequeue\" and payload == \"print\":\n\t\t\tprint(\"[GAMEQUEUE] Printing loaded maps\")\n\t\t\tfor fname in mapList:\n\t\t\t\tprint(\"[GAMEQUEUE] \" + fname)\n\t\t\t\n\t\t\tprint(\"[GAMEQUEUE] Printing loaded map configs\")\n\t\t\tfor fname in cfgList:\n\t\t\t\tprint(\"[GAMEQUEUE] \" + fname)\n\t\telif user.getAccess() == 10 and command == \"gamequeue\" and payload == \"queue\":\n\t\t\tprint(\"[GAMEQUEUE] Printing queue\")\n\t\t\t\n\t\t\tfor entry in hostQueue:\n\t\t\t\tprint(\"[GAMEQUEUE] \" + entry[0] + \" \" + entry[1] + \" \" + entry[2] + \" \" + entry[3] + \" \" + entry[4])\n\ndef gettime():\n\treturn int(round(time.time() * 1000))\n", "sub_path": "plugins/pychop/gamequeue.py", "file_name": "gamequeue.py", "file_ext": "py", "file_size_in_byte": 8093, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.deque", "line_number": 52, "usage_type": "call"}, {"api_name": "host.registerHandler", "line_number": 63, "usage_type": "call"}, {"api_name": "host.registerHandler", "line_number": 64, "usage_type": "call"}, {"api_name": "plugindb.PluginDB", "line_number": 67, "usage_type": "call"}, {"api_name": "host.unregisterHandler", "line_number": 73, "usage_type": "call"}, {"api_name": "host.unregisterHandler", "line_number": 74, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 80, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 81, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 133, "usage_type": "call"}, {"api_name": "time.time", "line_number": 265, "usage_type": "call"}]} +{"seq_id": "429546167", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nfrom seleshot import create\n \ndriver = webdriver.Firefox() \ndriver.get('http://127.0.0.1:8000/webApp/')\nlinkControls = driver.find_elements_by_tag_name('a')\nlinks = []\n\nfor linkControl in linkControls:\n links.append(linkControl.get_attribute('href'))\n\ndriver.close()\ns = create()\ns.get_screen(url='http://127.0.0.1:8000/webApp/')\n\nfor link in links:\n s.get_screen(url = link)\n \ns.close()", "sub_path": "src/test/seleniumTest.py", "file_name": "seleniumTest.py", "file_ext": "py", "file_size_in_byte": 479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 7, "usage_type": "name"}, {"api_name": "seleshot.create", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "197559246", "text": "# ----------------------------------------------------------------------------\n# Copyright 2016 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\nimport numpy as np\nimport pytest\n\nimport ngraph as ng\nfrom ngraph.util.utils import executor\nfrom ngraph.util.utils import RandomTensorGenerator\nfrom ngraph.op_graph.axes import spatial_axis\nfrom ngraph.frontends.neon import ax, ar\nfrom neon import NervanaObject\nfrom neon.backends import gen_backend\nfrom neon.layers.layer import Convolution\n\nrng = RandomTensorGenerator(0, np.float32)\n\n\nNervanaObject.be = gen_backend()\n\n\nclass DummyDeltaBuffers(object):\n \"\"\"\n Dummy class for delta buffers needed by neon\n \"\"\"\n def __init__(self):\n self.buffers = [None]\n\n\ndef test_wrong_filters_shape_length():\n \"\"\"\n test wrong filters shape length\n \"\"\"\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S])\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n with pytest.raises(ValueError) as exinfo:\n ng.convolution(conv_params, inputs, filters, {})\n assert str(exinfo.value) == 'convolution filter shape must be length 5, found {}'\\\n .format(len(ax_f))\n\n\ndef test_wrong_input_shape_length():\n \"\"\"\n test wrong input shape length\n \"\"\"\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n with pytest.raises(ValueError) as exinfo:\n ng.convolution(conv_params, inputs, filters, {})\n assert str(exinfo.value) == 'convolution input shape must be length 5, found {}'\\\n .format(len(ax_i))\n\n\ndef test_first_axes_not_same():\n \"\"\"\n test first axes are not the same\n \"\"\"\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n\n ax_i = ng.make_axes([ax.D, ax.C, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n with pytest.raises(ValueError) as exinfo:\n ng.convolution(conv_params, inputs, filters, {})\n assert str(exinfo.value) == 'the first axis in input {inputs} and filter {filters} ' \\\n 'are not the same.'.format(\n inputs=inputs.axes[0],\n filters=filters.axes[0])\n\n\ndef test_wrong_number_of_batch_axes_at_input():\n \"\"\"\n test wrong number of batch axes at input\n \"\"\"\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n\n C = 3\n D = 1\n ax_C = ng.make_axis(length=C, name='C', batch=True)\n ax_D = ng.make_axis(length=D, name='D', batch=True)\n\n ax_i = ng.make_axes([ax_C, ax_D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax_C, ax.T, ax.R, ax.S, ax.K])\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(ax_f)\n\n with pytest.raises(ValueError) as exinfo:\n ng.convolution(conv_params, inputs, filters, {})\n\n assert str(exinfo.value) == \"Input must have one batch axis. Found {n_batch_axes} \" \\\n \"batch axes: {batch_axes} Found {n_sample_axes} sample axes: {sample_axes}.\".format(\n n_batch_axes=len(inputs.axes.batch_axes()),\n batch_axes=inputs.axes.batch_axes(),\n n_sample_axes=len(inputs.axes.sample_axes()),\n sample_axes=inputs.axes.sample_axes())\n\n\ndef test_convolution(transformer_factory):\n \"\"\"\n test convolution forward path\n \"\"\"\n N = 128\n C, K = 3, 8\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n ax_o = ng.make_axes([\n ng.make_axis(ax_f.role_axes(ar.Channelout)[0].length, name='C', roles=[ar.Channel]),\n spatial_axis(ax_i, ax_f, padding['pad_d'], strides['str_d'], role=ar.Depth),\n spatial_axis(ax_i, ax_f, padding['pad_h'], strides['str_h'], role=ar.Height),\n spatial_axis(ax_i, ax_f, padding['pad_w'], strides['str_w'], role=ar.Width),\n ax.N\n ])\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n output = ng.convolution(conv_params, inputs, filters, axes=ax_o)\n targets = ng.placeholder(axes=output.axes)\n\n costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)\n error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)\n d_inputs = ng.deriv(error, inputs)\n d_filters = ng.deriv(error, filters)\n\n targets_value = rng.uniform(.1, 0.9, output.axes)\n\n conv_executor = executor([output, error, d_inputs, d_filters], inputs, filters, targets)\n result_ng, err_ng, gradI_ng, gradF_ng = conv_executor(input_value, filter_value, targets_value)\n\n # Now compute reference values via NEON\n NervanaObject.be.bsz = N\n neon_layer = Convolution(fshape=(R, S, K), padding=padding, strides=strides)\n\n inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))\n neon_layer.W = neon_layer.be.array(filter_value.reshape(C * R * S * T, K))\n neon_layer.dW = neon_layer.be.empty_like(neon_layer.W)\n neon_layer.configure((C, H, W))\n neon_layer.prev_layer = True\n neon_layer.allocate()\n neon_layer.set_deltas(DummyDeltaBuffers())\n\n result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)\n\n act_result_ne = 1. / (1.0 + np.exp(-result_ne))\n err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))\n gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)\n gradF_ne = neon_layer.dW.get().reshape(ax_f.lengths)\n\n # Compare fprop\n np.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)\n\n # Compare bprop\n np.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)\n\n # Compare update\n np.testing.assert_allclose(gradF_ng, gradF_ne, rtol=0, atol=1e-4)\n\n\ndef test_conv_flatten_deriv(transformer_factory):\n \"\"\"\n Test deriv of conv followed by flatten\n \"\"\"\n # set shape\n C, D, H, W, N = (3, 1, 28, 28, 8)\n C, T, R, S, K = (3, 1, 5, 5, 32)\n\n # i, f, o axes\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_o = ng.make_axes([\n ng.make_axis(32, roles=[ar.Channel]),\n ng.make_axis(1, roles=[ar.Depth]),\n ng.make_axis(24, roles=[ar.Height]),\n ng.make_axis(24, roles=[ar.Width]),\n ax.N\n ])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n params = dict(pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1)\n axes_rsck = ng.make_axes([ax.R, ax.S, ax.C, ax.K])\n axes_rsck_prime = ng.make_axes([ng.make_axis(l) for l in axes_rsck.lengths])\n\n # broadcast input / filter axes\n image = ng.constant(np.ones(ax_i.lengths), ax_i)\n filter = ng.variable(axes_rsck_prime,\n initial_value=np.ones((R, S, C, K)))\n filter_casted = ng.cast_axes(filter, axes_rsck)\n filter_casted = ng.expand_dims(filter_casted, ax.T, 0)\n filter_casted = ng.axes_with_order(filter_casted, axes=ax_f)\n\n # convolution\n output = ng.convolution(params, image, filter_casted, axes=ax_o)\n oC, oD, oH, oW, oN = output.axes\n output = ng.axes_with_order(output, axes=ng.make_axes([oN, oD, oH, oW, oC]))\n\n # slice away the oD\n out_slicing = [slice(None), 0, slice(None), slice(None), slice(None)]\n conv = ng.Slice(output, out_slicing)\n flatten = ng.flatten_at(conv, idx=1)\n\n # cost and grad\n cost = ng.sum(flatten, reduction_axes=flatten.axes)\n grad = ng.deriv(cost, filter)\n\n # compute\n conv_grad_comp = executor([conv, grad])\n conv_val, grad_val = conv_grad_comp()\n\n assert np.allclose(conv_val, np.zeros_like(conv_val) + 75.)\n assert np.allclose(grad_val, np.zeros_like(grad_val) + 4608.)\n", "sub_path": "tests/test_conv.py", "file_name": "test_conv.py", "file_ext": "py", "file_size_in_byte": 9318, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ngraph.util.utils.RandomTensorGenerator", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "neon.NervanaObject.be", "line_number": 31, "usage_type": "attribute"}, {"api_name": "neon.NervanaObject", "line_number": 31, "usage_type": "name"}, {"api_name": "neon.backends.gen_backend", "line_number": 31, "usage_type": "call"}, {"api_name": "ngraph.make_axes", "line_number": 51, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 51, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.D", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.H", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.W", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.N", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 52, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 52, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 52, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.T", "line_number": 52, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.R", "line_number": 52, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.S", "line_number": 52, "usage_type": "attribute"}, {"api_name": "ngraph.placeholder", "line_number": 54, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 57, "usage_type": "call"}, {"api_name": "ngraph.convolution", "line_number": 58, "usage_type": "call"}, {"api_name": "ngraph.make_axes", "line_number": 72, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 72, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.D", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.H", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.W", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 73, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 73, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.T", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.R", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.S", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.K", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ngraph.placeholder", "line_number": 75, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 76, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 78, "usage_type": "call"}, {"api_name": "ngraph.convolution", "line_number": 79, "usage_type": "call"}, {"api_name": "ngraph.make_axes", "line_number": 93, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.D", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 93, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.H", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.W", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.N", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 94, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 94, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.T", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.R", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.S", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.K", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ngraph.placeholder", "line_number": 96, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 97, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 99, "usage_type": "call"}, {"api_name": "ngraph.convolution", "line_number": 100, "usage_type": "call"}, {"api_name": "ngraph.make_axis", "line_number": 118, "usage_type": "call"}, {"api_name": "ngraph.make_axis", "line_number": 119, "usage_type": "call"}, {"api_name": "ngraph.make_axes", "line_number": 121, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.H", "line_number": 121, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 121, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.W", "line_number": 121, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.N", "line_number": 121, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 122, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.T", "line_number": 122, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 122, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.R", "line_number": 122, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.S", "line_number": 122, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.K", "line_number": 122, "usage_type": "attribute"}, {"api_name": "ngraph.placeholder", "line_number": 124, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 125, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 127, "usage_type": "call"}, {"api_name": "ngraph.convolution", "line_number": 128, "usage_type": "call"}, {"api_name": "ngraph.make_axes", "line_number": 153, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 153, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 153, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.D", "line_number": 153, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.H", "line_number": 153, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.W", "line_number": 153, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.N", "line_number": 153, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 154, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 154, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 154, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.T", "line_number": 154, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.R", "line_number": 154, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.S", "line_number": 154, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.K", "line_number": 154, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 157, "usage_type": "call"}, {"api_name": "ngraph.make_axis", "line_number": 158, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Channelout", "line_number": 158, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 158, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ar.Channel", "line_number": 158, "usage_type": "attribute"}, {"api_name": "ngraph.op_graph.axes.spatial_axis", "line_number": 159, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Depth", "line_number": 159, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 159, "usage_type": "name"}, {"api_name": "ngraph.op_graph.axes.spatial_axis", "line_number": 160, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Height", "line_number": 160, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 160, "usage_type": "name"}, {"api_name": "ngraph.op_graph.axes.spatial_axis", "line_number": 161, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Width", "line_number": 161, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 161, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.N", "line_number": 162, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 162, "usage_type": "name"}, {"api_name": "ngraph.placeholder", "line_number": 165, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 166, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 175, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 176, "usage_type": "call"}, {"api_name": "ngraph.convolution", "line_number": 178, "usage_type": "call"}, {"api_name": "ngraph.placeholder", "line_number": 179, "usage_type": "call"}, {"api_name": "ngraph.cross_entropy_binary", "line_number": 181, "usage_type": "call"}, {"api_name": "ngraph.sigmoid", "line_number": 181, "usage_type": "call"}, {"api_name": "ngraph.sum", "line_number": 182, "usage_type": "call"}, {"api_name": "ngraph.batch_size", "line_number": 182, "usage_type": "call"}, {"api_name": "ngraph.deriv", "line_number": 183, "usage_type": "call"}, {"api_name": "ngraph.deriv", "line_number": 184, "usage_type": "call"}, {"api_name": "ngraph.util.utils.executor", "line_number": 188, "usage_type": "call"}, {"api_name": "neon.NervanaObject.be", "line_number": 192, "usage_type": "attribute"}, {"api_name": "neon.NervanaObject", "line_number": 192, "usage_type": "name"}, {"api_name": "neon.layers.layer.Convolution", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 214, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 217, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 229, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 229, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.D", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.H", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.W", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.N", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 230, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 230, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 230, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.T", "line_number": 230, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.R", "line_number": 230, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.S", "line_number": 230, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.K", "line_number": 230, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 231, "usage_type": "call"}, {"api_name": "ngraph.make_axis", "line_number": 232, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Channel", "line_number": 232, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 232, "usage_type": "name"}, {"api_name": "ngraph.make_axis", "line_number": 233, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Depth", "line_number": 233, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 233, "usage_type": "name"}, {"api_name": "ngraph.make_axis", "line_number": 234, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Height", "line_number": 234, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 234, "usage_type": "name"}, {"api_name": "ngraph.make_axis", "line_number": 235, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ar.Width", "line_number": 235, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ar", "line_number": 235, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.N", "line_number": 236, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 236, "usage_type": "name"}, {"api_name": "ngraph.make_axes", "line_number": 241, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.R", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 241, "usage_type": "name"}, {"api_name": "ngraph.frontends.neon.ax.S", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.C", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax.K", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ngraph.make_axes", "line_number": 242, "usage_type": "call"}, {"api_name": "ngraph.make_axis", "line_number": 242, "usage_type": "call"}, {"api_name": "ngraph.constant", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 245, "usage_type": "call"}, {"api_name": "ngraph.variable", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 247, "usage_type": "call"}, {"api_name": "ngraph.cast_axes", "line_number": 248, "usage_type": "call"}, {"api_name": "ngraph.expand_dims", "line_number": 249, "usage_type": "call"}, {"api_name": "ngraph.frontends.neon.ax.T", "line_number": 249, "usage_type": "attribute"}, {"api_name": "ngraph.frontends.neon.ax", "line_number": 249, "usage_type": "name"}, {"api_name": "ngraph.axes_with_order", "line_number": 250, "usage_type": "call"}, {"api_name": "ngraph.convolution", "line_number": 253, "usage_type": "call"}, {"api_name": "ngraph.axes_with_order", "line_number": 255, "usage_type": "call"}, {"api_name": "ngraph.make_axes", "line_number": 255, "usage_type": "call"}, {"api_name": "ngraph.Slice", "line_number": 259, "usage_type": "call"}, {"api_name": "ngraph.flatten_at", "line_number": 260, "usage_type": "call"}, {"api_name": "ngraph.sum", "line_number": 263, "usage_type": "call"}, {"api_name": "ngraph.deriv", "line_number": 264, "usage_type": "call"}, {"api_name": "ngraph.util.utils.executor", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "134237263", "text": "# -*- coding: utf-8 -*-\n# @File : main.py\n# @AUTH : swxs\n# @Time : 2019/2/11 9:33\n\nimport re\nimport time\nimport requests\nimport asyncio\nimport pandas as pd\nfrom Helpers.AioHelpers.Aiohelper_http import get\nfrom Helpers.AioHelpers.Aiohttp_file import write\n\nSEARCH_URL = \"http://www.ip138.com:8080/search.asp\"\nsearch_location = r'<TD width=\"(.*)\" align=\"center\">卡号归属地</TD><td align=\"center\" class=tdc2><!-- <td></td> -->(?P<position>.*)</TD>'\n\nINFO = []\n\n\nasync def get_location(i):\n data = {\n \"mobile\": f\"139{i:04}0080\",\n \"action\": \"mobile\"\n }\n response = await get(SEARCH_URL, data)\n try:\n return re.search(search_location, response)[\"position\"]\n except:\n return \"Unknown.\"\n\n\nasync def main(i):\n position = await get_location(i)\n INFO.append((i, position))\n\n\ndef save_info():\n data = {\n \"number\": [],\n \"position\": []\n }\n for info in INFO:\n data[\"number\"].append(info[0])\n data[\"position\"].append(info[1])\n df = pd.DataFrame(data)\n df.to_csv(f\"position.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n try:\n for j in range(1000):\n time.sleep(5)\n # asyncio.ensure_future定义task对象,以在run_until_complete方法中运行\n tasks = [asyncio.ensure_future(main(i)) for i in range(j * 10, (j + 1) * 10)]\n # asyncio.get_event_loop()创建事件循环loop\n loop = asyncio.get_event_loop()\n # asyncio.wait的参数必须是task对象组成的列表,表示执行多次请求,使用方法和gevent相似\n loop.run_until_complete(asyncio.wait(tasks))\n # loop.run_until_complete(asyncio.gather(*tasks))\n except:\n pass\n finally:\n save_info()\n", "sub_path": "store/Python/mine/tools_search_phone_location/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "Helpers.AioHelpers.Aiohelper_http.get", "line_number": 25, "usage_type": "call"}, {"api_name": "re.search", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 54, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 56, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "201540953", "text": "import typing\n\nimport numpy as np\nimport torch\nimport torch.distributions\n\nimport mantrap.environment\n\nfrom .acc_interact import InteractionAccelerationModule\n\n\nclass InteractionVelocityModule(InteractionAccelerationModule):\n \"\"\"Loss based on difference of velocities due to interaction between robot and ados.\n\n As a proxy for interaction based on the mean velocity of every ado is computed in a (fictional) scene without an\n ego (robot) and compared to the actual occurring positions in the scene, as in intuitive measure for the change\n the robot's presence introduces to the scene.\n\n Re-Predicting it every time-step would be more correct, however it would also require a lot more computational\n effort (horizon times as much to be exact). Therefore merely the behavior of the ado without ego is computed\n that would occur, if the ego is not there from the beginning.\n\n .. math:: objective = 1/T \\\\sum_{T} \\\\sum_{ados} || vel_{t,i} - vel_{t,i}^{wo} ||_2\n\n :param env: solver's environment environment for predicting the behaviour without interaction.\n \"\"\"\n def __init__(self, env: mantrap.environment.base.GraphBasedEnvironment, t_horizon: int, weight: float = 1.0,\n **unused):\n super(InteractionVelocityModule, self).__init__(env=env, t_horizon=t_horizon, weight=weight)\n self._max_value = mantrap.constants.OBJECTIVE_VEL_INTERACT_MAX\n\n def summarize_distribution(self, ego_trajectory: typing.Union[torch.Tensor, None]) -> torch.Tensor:\n \"\"\"Compute ado-wise velocities from velocity distribution dict mean values.\"\"\"\n if ego_trajectory is not None:\n dist_dict = self.env.compute_distributions(ego_trajectory=ego_trajectory, vel_dist=True)\n else:\n dist_dict = self.env.compute_distributions_wo_ego(t_horizon=self.t_horizon)\n\n sample_length = self.env.num_modes * self.t_horizon\n velocities = torch.zeros((self.env.num_ados, sample_length, 2))\n for ado_id, distribution in dist_dict.items():\n m_ado = self.env.index_ado_id(ado_id)\n velocities[m_ado, :, :] = distribution.mean.view(-1, 2)\n return velocities\n\n ###########################################################################\n # Objective Properties ####################################################\n ###########################################################################\n @property\n def name(self) -> str:\n return \"interaction_vel\"\n", "sub_path": "mantrap/modules/baselines/vel_interact.py", "file_name": "vel_interact.py", "file_ext": "py", "file_size_in_byte": 2484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "acc_interact.InteractionAccelerationModule", "line_number": 12, "usage_type": "name"}, {"api_name": "mantrap.environment.environment", "line_number": 27, "usage_type": "attribute"}, {"api_name": "mantrap.environment", "line_number": 27, "usage_type": "name"}, {"api_name": "mantrap.environment.constants", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mantrap.environment", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "418785148", "text": "# Unit tests related to 'Address' (https://www.easypost.com/docs/api#addresses).\n\nimport pytest\n\nimport easypost\n\n\n@pytest.mark.vcr()\ndef test_address_creation_verification():\n # Create an address and then verify some fields to test whether it was created just fine.\n address = easypost.Address.create(\n company=\"EasyPost\",\n street1=\"118 2nd St\",\n street2=\"4th Fl\",\n city=\"San Francisco\",\n state=\"CA\",\n zip=\"94105\",\n phone=\"415-456-7890\",\n )\n address.verify()\n\n address = easypost.Address.retrieve(address.id)\n\n assert address.country == \"US\"\n assert address.email is None\n assert address.federal_tax_id is None\n assert address.state == \"CA\"\n assert address.zip == \"94105\"\n\n\n@pytest.mark.vcr()\ndef test_address_creation_with_verify():\n # Create an address with a verify parameter to test that it verifies accurately\n address = easypost.Address.create(\n verify=[\"delivery\"],\n street1=\"118 2\",\n street2=\"FLoor 4\",\n city=\"San Francisco\",\n state=\"CA\",\n zip=\"94105\",\n country=\"US\",\n company=\"EasyPost\",\n phone=\"415-456-7890\",\n )\n\n assert address.id is not None\n assert address.street1 == \"118 2ND ST FL 4\"\n assert address.street2 == \"\"\n assert address.country == \"US\"\n\n\n@pytest.mark.vcr()\ndef test_address_creation_with_verify_bool():\n # Create an address with a verify parameter to test that it verifies accurately\n address = easypost.Address.create(\n verify=True,\n street1=\"118 2\",\n street2=\"FLoor 4\",\n city=\"San Francisco\",\n state=\"CA\",\n zip=\"94105\",\n country=\"US\",\n company=\"EasyPost\",\n phone=\"415-456-7890\",\n )\n\n assert address.id is not None\n assert address.street1 == \"118 2ND ST FL 4\"\n assert address.street2 == \"\"\n assert address.country == \"US\"\n\n\n@pytest.mark.vcr()\ndef test_address_creation_with_verify_failure():\n # Create an address with a verify parameter to test that it fails elegantly\n address = easypost.Address.create(\n verify=[\"delivery\"],\n street1=\"UNDELIEVRABLE ST\",\n city=\"San Francisco\",\n state=\"CA\",\n zip=\"94105\",\n country=\"US\",\n company=\"EasyPost\",\n phone=\"415-456-7890\",\n )\n\n assert address.id is not None\n assert address.street1 == \"UNDELIEVRABLE ST\"\n\n assert address.verifications[\"delivery\"][\"success\"] is False\n\n assert len(address.verifications[\"delivery\"][\"errors\"]) >= 2\n assert address.verifications[\"delivery\"][\"errors\"][0][\"message\"] == \"Address not found\"\n assert address.verifications[\"delivery\"][\"errors\"][1][\"message\"] == \"House number is missing\"\n\n\n@pytest.mark.vcr()\ndef test_address_creation_with_verify_strict_failure():\n # Create an address with a verify strict parameter to test that it fails elegantly\n with pytest.raises(easypost.Error) as caught_exception:\n easypost.Address.create(\n verify_strict=[\"delivery\"],\n street1=\"UNDELIEVRABLE ST\",\n city=\"San Francisco\",\n state=\"CA\",\n zip=\"94105\",\n country=\"US\",\n company=\"EasyPost\",\n phone=\"415-456-7890\",\n )\n\n exception = caught_exception.value.json_body\n\n assert exception[\"error\"][\"code\"] == \"ADDRESS.VERIFY.FAILURE\"\n assert exception[\"error\"][\"message\"] == \"Unable to verify address.\"\n\n assert len(exception[\"error\"][\"errors\"]) >= 2\n assert exception[\"error\"][\"errors\"][0][\"message\"] == \"Address not found\"\n assert exception[\"error\"][\"errors\"][1][\"message\"] == \"House number is missing\"\n\n\n@pytest.mark.vcr()\ndef test_address_unicode():\n # Create an address with unicode field and assert if it was created correctly.\n state = u\"DELEGACI\\xf3N BENITO JU\\xe1REZ\"\n\n address = easypost.Address.create(state=state)\n assert address.state == state\n\n\n@pytest.mark.vcr()\ndef test_address_bytestring():\n # Create an address with a bytestring field and assert if it was created correctly.\n state = u\"DELEGACI\\xf3N BENITO JU\\xe1REZ\"\n\n address = easypost.Address.create(state=state.encode(\"utf-8\"))\n assert address.state == state\n", "sub_path": "tests/test_address.py", "file_name": "test_address.py", "file_ext": "py", "file_size_in_byte": 4188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "easypost.Address.create", "line_number": 11, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 11, "usage_type": "attribute"}, {"api_name": "easypost.Address.retrieve", "line_number": 22, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pytest.mark.vcr", "line_number": 8, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}, {"api_name": "easypost.Address.create", "line_number": 34, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pytest.mark.vcr", "line_number": 31, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 31, "usage_type": "attribute"}, {"api_name": "easypost.Address.create", "line_number": 55, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pytest.mark.vcr", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 52, "usage_type": "attribute"}, {"api_name": "easypost.Address.create", "line_number": 76, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pytest.mark.vcr", "line_number": 73, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 100, "usage_type": "call"}, {"api_name": "easypost.Error", "line_number": 100, "usage_type": "attribute"}, {"api_name": "easypost.Address.create", "line_number": 101, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pytest.mark.vcr", "line_number": 97, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 97, "usage_type": "attribute"}, {"api_name": "easypost.Address.create", "line_number": 127, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pytest.mark.vcr", "line_number": 122, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 122, "usage_type": "attribute"}, {"api_name": "easypost.Address.create", "line_number": 136, "usage_type": "call"}, {"api_name": "easypost.Address", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pytest.mark.vcr", "line_number": 131, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 131, "usage_type": "attribute"}]} +{"seq_id": "333070915", "text": "from spacy.tokens import Span\n\nfrom respacy.phrases import PhraseDetector\n\n\ndef test_np_simple(nlp):\n text = \"a simple noun phrase and a second noun phrase.\"\n doc = PhraseDetector(nlp.vocab)(nlp(text))\n assert len(doc._.phrases) == 2\n assert doc._.phrases[0] == (\"NP\", Span(doc, 0, 4))\n assert doc._.phrases[1] == (\"NP\", Span(doc, 5, 9))\n\n\ndef test_np_complex(nlp):\n text = \"this is the long and unexpectedly complex noun phrase.\"\n doc = PhraseDetector(nlp.vocab)(nlp(text))\n assert len(doc._.phrases) == 1\n assert doc._.phrases[0] == (\"NP\", Span(doc, 2, 9))\n\n\ndef test_vp_simple(nlp):\n text = \"this was created obviously simple.\"\n doc = PhraseDetector(nlp.vocab)(nlp(text))\n assert len(doc._.phrases) == 1\n assert doc._.phrases[0] == (\"VP\", Span(doc, 1, 4))\n\n\ndef test_vp_complex(nlp):\n text = \"I have been deeply trying to find.\"\n doc = PhraseDetector(nlp.vocab)(nlp(text))\n assert len(doc._.phrases) == 1\n assert doc._.phrases[0] == (\"VP\", Span(doc, 1, 5))\n", "sub_path": "tests/test_phrases.py", "file_name": "test_phrases.py", "file_ext": "py", "file_size_in_byte": 1011, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "respacy.phrases.PhraseDetector", "line_number": 8, "usage_type": "call"}, {"api_name": "spacy.tokens.Span", "line_number": 10, "usage_type": "call"}, {"api_name": "spacy.tokens.Span", "line_number": 11, "usage_type": "call"}, {"api_name": "respacy.phrases.PhraseDetector", "line_number": 16, "usage_type": "call"}, {"api_name": "spacy.tokens.Span", "line_number": 18, "usage_type": "call"}, {"api_name": "respacy.phrases.PhraseDetector", "line_number": 23, "usage_type": "call"}, {"api_name": "spacy.tokens.Span", "line_number": 25, "usage_type": "call"}, {"api_name": "respacy.phrases.PhraseDetector", "line_number": 30, "usage_type": "call"}, {"api_name": "spacy.tokens.Span", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "15417024", "text": "import warnings\nfrom typing import Tuple, Union\n\nimport numpy as np\nimport yaml\nfrom astropy.convolution import convolve\nfrom scipy import ndimage\nfrom scipy.signal import medfilt, medfilt2d\n\nfrom .modelling import ModelFit\n\n\ndef _check_convolve_dims(data, half_size: [None, Tuple[int, None]] = None):\n \"\"\"Check the kernel sizes to be used in various convolution-like operations.\n If the kernel sizes are too big, replace them with the largest allowable size\n and issue a warning to the user.\n\n .. note:: ripped from here: https://github.com/HERA-Team/hera_qm/blob/master/hera_qm/xrfi.py\n\n Parameters\n ----------\n data : array\n 1- or 2-D array that will undergo convolution-like operations.\n half_size : tuple\n Tuple of ints or None's with length ``data.ndim``. They represent the half-size\n of the kernel to be used (or, rather the kernel will be 2*half_size+1 in each\n dimension). None uses half_size=data.shape.\n\n Returns\n -------\n size : tuple\n The kernel size in each dimension.\n\n Raises\n ------\n ValueError:\n If half_size does not match the number of dimensions.\n \"\"\"\n if half_size is None:\n half_size = (None,) * data.ndim\n\n if len(half_size) != data.ndim:\n raise ValueError(\n \"Number of kernel dimensions does not match number of data dimensions.\"\n )\n\n out = []\n for data_shape, hsize in zip(data.shape, half_size):\n if hsize is None or hsize > data_shape:\n out.append(data_shape)\n elif hsize < 0:\n out.append(0)\n else:\n out.append(hsize)\n\n return tuple(out)\n\n\ndef robust_divide(num, den):\n \"\"\"Prevent division by zero.\n This function will compute division between two array-like objects by setting\n values to infinity when the denominator is small for the given data type. This\n avoids floating point exception warnings that may hide genuine problems\n in the data.\n Parameters\n ----------\n num : array\n The numerator.\n den : array\n The denominator.\n Returns\n -------\n out : array\n The result of dividing num / den. Elements where b is small (or zero) are set\n to infinity.\n \"\"\"\n thresh = np.finfo(den.dtype).eps\n\n den_mask = np.abs(den) > thresh\n\n out = np.true_divide(num, den, where=den_mask)\n out[~den_mask] = np.inf\n\n # If numerator is also small, set to zero (better for smooth stuff)\n out[~den_mask & (np.abs(num) <= thresh)] = 0\n return out\n\n\ndef flagged_filter(\n data: np.ndarray,\n size: [int, Tuple[int]],\n kind: str = \"median\",\n flags: [None, np.ndarray] = None,\n mode: [None, str] = None,\n interp_flagged=True,\n **kwargs,\n):\n \"\"\"\n Perform an n-dimensional filter operation on optionally flagged data.\n\n Parameters\n ----------\n data : np.ndarray\n The data to filter. Can be of arbitrary dimension.\n size : int or tuple\n The size of the filtering convolution kernel. If tuple, one entry per dimension\n in `data`.\n kind : str, optional\n The function to apply in each window. Typical options are `mean` and `median`.\n For this function to work, the function kind chosen here must have a correspdoning\n `nan<function>` implementation in numpy.\n flags : np.ndarray, optional\n A boolean array specifying data to omit from the filtering.\n mode : str, optional\n The mode of the filter. See ``scipy.ndimage.generic_filter` for details. By default,\n 'nearest' if size < data.size otherwise 'reflect'.\n interp_flagged : bool, optional\n Whether to fill in flagged entries with its filtered value. Otherwise,\n flagged entries are set to their original value.\n kwargs :\n Other options to pass to the generic filter function.\n\n Returns\n -------\n np.ndarray :\n The filtered array, of the same shape and type as ``data``.\n\n Notes\n -----\n This function can typically be used to implement a flagged median filter. It does\n have some limitations in this regard, which we will now describe.\n\n It would be expected that a perfectly smooth\n monotonic function, after median filtering, should remain identical to the input.\n This is only the case for the default 'nearest' mode. For the alternative 'reflect'\n mode, the edge-data will be corrupted from the input. On the other hand, it may be\n expected that if the kernel width is equal to or larger than the data size, that\n the operation is merely to perform a full collapse over that dimension. This is the\n case only for mode 'reflect', while again mode 'nearest' will continue to yield (a\n very slow) identity operation. By default, the mode will be set to 'reflect' if\n the size is >= the data size, with an emitted warning.\n\n Furthermore, a median filter is *not* an identity operation, even on monotonic\n functions, for an even-sized kernel (in this case it's the average of the two\n central values).\n\n Also, even for an odd-sized kernel, if using flags, some of the windows will contain\n an odd number of useable data, in which case the data surrounding the flag will not\n be identical to the input.\n\n Finally, flags near the edges can have strange behaviour, depending on the mode.\n \"\"\"\n if mode is None:\n if (isinstance(size, int) and size >= min(data.shape)) or (\n isinstance(size, tuple) and any(s > d for s, d in zip(size, data.shape))\n ):\n warnings.warn(\n \"Setting default mode to reflect because a large size was set.\"\n )\n mode = \"reflect\"\n else:\n mode = \"nearest\"\n\n if flags is not None and np.any(flags):\n fnc = getattr(np, \"nan\" + kind)\n assert flags.shape == data.shape\n orig_flagged_data = data[flags].copy()\n data[flags] = np.nan\n filtered = ndimage.generic_filter(data, fnc, size=size, mode=mode, **kwargs)\n if not interp_flagged:\n filtered[flags] = orig_flagged_data\n data[flags] = orig_flagged_data\n\n else:\n if kind == \"mean\":\n kind = \"uniform\"\n filtered = getattr(ndimage, kind + \"_filter\")(\n data, size=size, mode=mode, **kwargs\n )\n\n return filtered\n\n\ndef detrend_medfilt(\n data: np.ndarray,\n flags: [None, np.ndarray] = None,\n half_size: [None, Tuple[int, None]] = None,\n):\n \"\"\"Detrend array using a median filter.\n\n .. note:: ripped from here: https://github.com/HERA-Team/hera_qm/blob/master/hera_qm/xrfi.py\n\n Parameters\n ----------\n data : array\n Data to detrend. Can be an array of any number of dimensions.\n flags : boolean array, optional\n Flags specifying data to ignore in the detrend. If not given, don't ignore\n anything.\n half_size : tuple of int/None\n The half-size of the kernel to convolve (kernel size will be 2*half_size+1).\n Value of zero (for any dimension) omits that axis from the kernel, effectively\n applying the detrending for each subarray along that axis. Value of None will\n effectively (but slowly) perform a median along the entire axis before running\n the kernel over the other axis.\n\n Returns\n -------\n out : array\n An array containing the outlier significance metric. Same type and size as `data`.\n\n Notes\n -----\n This detrending is very good for data with large RFI compared to the noise, but also\n reasonably large noise compared to the spectrum steepness. If the noise is small\n compared to the steepness of the spectrum, individual windows can become *almost always*\n monotonic, in which case the randomly non-monotonic bins \"stick out\" and get wrongly\n flagged. This can be helped three ways:\n 1) Use a smaller bin width. This helps by reducing the probability that a bin will\n be randomly non-monotonic. However it also loses signal-to-noise on the RFI.\n 2) Pre-fit a smooth model that \"flattens\" the spectrum. This helps by reducing the\n probability that bins will be monotonic (higher noise level wrt steepness). It\n has the disadvantage that fitted models can be wrong when there's RFI there.\n 3) Follow the medfilt with a meanfilt: if the medfilt is able to flag most/all of\n the RFI, then a following meanfilt will tend to \"unfilter\" the wrongly flagged\n parts.\n\n \"\"\"\n half_size = _check_convolve_dims(data, half_size)\n size = tuple(2 * s + 1 for s in half_size)\n\n d_sm = flagged_filter(data, size=size, kind=\"median\", flags=flags)\n d_rs = data - d_sm\n d_sq = d_rs ** 2\n\n # Remember that d_sq will be zero for any window in which the data is monotonic (but\n # could also be zero for non-monotonic windows where the two halves of the window\n # are self-contained). Most smooth functions will be monotonic in small enough\n # windows. If noise is of low-enough amplitude wrt the steepness of the smooth\n # underlying function, there is a good chance the resulting data will also be\n # monotonic. Nevertheless, any RFI that is large enough will cause the value of\n # that channel to *not* be the central value, and it will have d_sq > 0.\n\n # Factor of .456 is to put mod-z scores on same scale as standard deviation.\n sig = np.sqrt(flagged_filter(d_sq, size=size, kind=\"median\", flags=flags) / 0.456)\n\n # don't divide by zero, instead turn those entries into +inf\n return robust_divide(d_rs, sig)\n\n\ndef detrend_meanfilt(\n data: np.ndarray,\n flags: [None, np.ndarray] = None,\n half_size: [None, Tuple[int, None]] = None,\n):\n \"\"\"Detrend array using a mean filter.\n\n Parameters\n ----------\n data : array\n Data to detrend. Can be an array of any number of dimensions.\n flags : boolean array, optional\n Flags specifying data to ignore in the detrend. If not given, don't ignore\n anything.\n half_size : tuple of int/None\n The half-size of the kernel to convolve (kernel size will be 2*half_size+1).\n Value of zero (for any dimension) omits that axis from the kernel, effectively\n applying the detrending for each subarray along that axis. Value of None will\n effectively (but slowly) perform a median along the entire axis before running\n the kernel over the other axis.\n\n Returns\n -------\n out : array\n An array containing the outlier significance metric. Same type and size as `data`.\n\n Notes\n -----\n This detrending is very good for data that has most of the RFI flagged already, but\n will perform very poorly when un-flagged RFI still exists. It is often useful to\n precede this with a median filter.\n \"\"\"\n\n half_size = _check_convolve_dims(data, half_size)\n size = tuple(2 * s + 1 for s in half_size)\n\n d_sm = flagged_filter(data, size=size, kind=\"mean\", flags=flags)\n d_rs = data - d_sm\n d_sq = d_rs ** 2\n\n # Factor of .456 is to put mod-z scores on same scale as standard deviation.\n sig = np.sqrt(flagged_filter(d_sq, size=size, kind=\"mean\", flags=flags))\n\n # don't divide by zero, instead turn those entries into +inf\n return robust_divide(d_rs, sig)\n\n\ndef xrfi_medfilt(\n spectrum: np.ndarray,\n threshold: float = 6,\n flags: [None, np.ndarray] = None,\n kf: [int, None] = 8,\n kt: [int, None] = 8,\n inplace: bool = True,\n max_iter: int = 1,\n poly_order=0,\n accumulate=False,\n use_meanfilt=True,\n):\n \"\"\"Generate RFI flags for a given spectrum using a median filter.\n\n Parameters\n ----------\n spectrum : array-like\n Either a 1D array of shape ``(NFREQS,)`` or a 2D array of shape\n ``(NTIMES, NFREQS)`` defining the measured raw spectrum.\n If 2D, a 2D filter in freq*time will be applied by default. One can perform\n the filter just over frequency (in the case that `NTIMES > 1`) by setting\n `kt=0`.\n threshold : float, optional\n Number of effective sigma at which to clip RFI.\n flags : array-like, optional\n Boolean array of pre-existing flagged data to ignore in the filtering.\n kt, kf : tuple of int/None\n The half-size of the kernel to convolve (eg. kernel size over frequency\n will be ``2*kt+1``).\n Value of zero (for any dimension) omits that axis from the kernel, effectively\n applying the detrending for each subarray along that axis. Value of None will\n effectively (but slowly) perform a median along the entire axis before running\n the kernel over the other axis.\n inplace : bool, optional\n If True, and flags are given, update the flags in-place instead of creating a\n new array.\n max_iter : int, optional\n Maximum number of iterations to perform. Each iteration uses the flags of the\n previous iteration to achieve a more robust estimate of the flags. Multiple\n iterations are more useful if ``poly_order > 0``.\n poly_order : int, optional\n If greater than 0, fits a polynomial to the spectrum before performing\n the median filter. Only allowed if spectrum is 1D. This is useful for getting\n the number of false positives down. If max_iter>1, the polynomial will be refit\n on each iteration (using new flags).\n accumulate : bool,optional\n If True, on each iteration, accumulate flags. Otherwise, use only flags from the\n previous iteration and then forget about them. Recommended to be False.\n use_meanfilt : bool, optional\n Whether to apply a mean filter *after* the median filter. The median filter is\n good at getting RFI, but can also pick up non-RFI if the spectrum is steep\n compared to the noise. The mean filter is better at only getting RFI if the RFI\n has already been flagged.\n\n Returns\n -------\n flags : array-like\n Boolean array of the same shape as ``spectrum`` indicated which channels/times\n have flagged RFI.\n\n Notes\n -----\n The default combination of using a median filter followed by a mean filter works\n quite well. The median filter works quite well at picking up large RFI (wrt to the\n noise level), but can also create false positives if the noise level is small wrt\n the steepness of the slope. Following by a flagged mean filter tends to remove these\n false positives (as it doesn't get pinned to zero when the function is monotonic).\n\n It is unclear whether performing an iterative filtering is very useful unless using\n a polynomial subtraction. With polynomial subtraction, one should likely use at least\n a few iterations, without accumulation, so that the polynomial is not skewed by the\n as-yet-unflagged RFI.\n\n Choice of kernel size can be important. The wider the kernel, the more \"signal-to-noise\"\n one will get on the RFI. Also, if there is a bunch of RFI all clumped together, it will\n definitely be missed by a kernel window of order double the size of the clump or less.\n By increasing the kernel size, these clumps are picked up, but edge-effects become\n more prevalent in this case. One option here would be to iterate over kernel sizes\n (getting smaller), such that very large blobs are first flagged out, then progressively\n finer detail is added. Use ``xrfi_iterative_medfilt`` for that.\n \"\"\"\n iter = 0\n\n if flags is None:\n new_flags = np.zeros(spectrum.shape, dtype=bool)\n else:\n new_flags = flags if inplace else flags.copy()\n\n nflags = -1\n\n nflags_list = []\n resid_list = []\n assert max_iter > 0\n resid = spectrum.copy()\n\n size = (kf,) if spectrum.ndim == 1 else (kt, kf)\n while iter < max_iter and np.sum(new_flags) > nflags:\n nflags = np.sum(new_flags)\n\n if spectrum.ndim == 1 and poly_order:\n # Subtract a smooth polynomial first.\n # The point of this is that steep spectra with only a little bit of noise\n # tend to detrend to exactly zero, but randomly may detrend to something non-zero.\n # In this case, the behaviour is to set the significance to infinity. This is not\n # a problem for data in which the noise is large compared to the signal. We can\n # force this by initially detrending by some flexible polynomial over the whole\n # band. This is not guaranteed to work -- the poly fit itself could over-fit\n # for RFI. Therefore the order of the fit should be low. Its purpose is not to\n # do a \"good fit\" to the data, but rather to get the residuals \"flat enough\" that\n # the median filter works.\n # TODO: the following is pretty limited (why polynomial?) but it seems to do\n # reasonably well.\n f = np.linspace(0, 1, len(spectrum))\n resid[~new_flags] = (\n spectrum[~new_flags]\n - ModelFit(\n \"polynomial\",\n f[~new_flags],\n spectrum[~new_flags],\n n_terms=poly_order,\n ).evaluate()\n )\n resid_list.append(resid)\n else:\n resid = spectrum\n\n med_significance = detrend_medfilt(resid, half_size=size, flags=new_flags)\n\n if use_meanfilt:\n medfilt_flags = np.abs(med_significance) > threshold\n significance = detrend_meanfilt(resid, half_size=size, flags=medfilt_flags)\n else:\n significance = med_significance\n\n if accumulate:\n new_flags |= np.abs(significance) > threshold\n else:\n new_flags = np.abs(significance) > threshold\n\n iter += 1\n nflags_list.append(np.sum(new_flags))\n\n if 1 < max_iter == iter and np.sum(new_flags) > nflags:\n warnings.warn(\"Median filter reached max_iter and is still finding new RFI.\")\n\n return (\n new_flags,\n {\n \"significance\": significance,\n \"median_significance\": med_significance,\n \"iters\": iter,\n \"nflags\": nflags_list,\n \"residuals\": resid_list,\n },\n )\n\n\ndef xrfi_iterative_medfilt(\n spectrum: np.ndarray,\n threshold: float = 6,\n flags: [None, np.ndarray] = None,\n min_kf: [int, None] = 8,\n min_kt: [int, None] = 8,\n max_kf: [int, None] = None,\n max_kt: [int, None] = None,\n inplace: bool = True,\n accumulate=False,\n use_meanfilt=True,\n):\n \"\"\"\n An iterative median filter, in which the window size is progressively reduced.\n\n Parameters\n ----------\n spectrum\n threshold\n flags\n kf\n kt\n inplace\n max_iter\n poly_order\n accumulate\n use_meanfilt\n\n Returns\n -------\n\n \"\"\"\n raise NotImplementedError(\"This has not been implemented yet.\")\n\n\ndef xrfi_explicit(f, rfi_file=None, extra_rfi=None):\n \"\"\"\n Excise RFI from given data using a explicitly set list of flag ranges.\n\n Parameters\n ----------\n f : array-like\n Frequencies, in MHz, of the data.\n rfi_file : str, optional\n A YAML file containing the key 'rfi_ranges', which should be a list of 2-tuples\n giving the (min, max) frequency range of known RFI channels (in MHz). By default,\n uses a file included in `edges-analysis` with known RFI channels from the MRO.\n extra_rfi : list, optional\n A list of extra RFI channels (in the format of the `rfi_ranges` from the `rfi_file`).\n\n Returns\n -------\n flags : array-like\n Boolean array of the same shape as ``spectrum`` indicated which channels/times\n have flagged RFI.\n \"\"\"\n\n rfi_freqs = []\n if rfi_file:\n with open(rfi_file, \"r\") as fl:\n rfi_freqs += yaml.load(fl, Loader=yaml.FullLoader)[\"rfi_ranges\"]\n\n if extra_rfi:\n rfi_freqs += extra_rfi\n\n flags = np.zeros(len(f), dtype=bool)\n for low, high in rfi_freqs:\n flags[(f > low) & (f < high)] = True\n\n return flags\n\n\ndef _get_mad(x):\n med = np.median(x)\n # Factor of 0.456 to scale median back to Gaussian std dev.\n return np.median(np.abs(x - med)) / np.sqrt(0.456)\n\n\ndef xrfi_poly_filter(\n spectrum,\n weights=None,\n window_width=100,\n n_poly=4,\n n_bootstrap=20,\n n_sigma=2.5,\n use_median=False,\n flip=False,\n):\n \"\"\"\n Flag RFI by using a moving window and a low-order polynomial to detrend.\n\n This is similar to :func:`xrfi_medfilt`, except that within each sliding window,\n a low-order polynomial is fit, and the std dev of the residuals is used as the\n underlying distribution width at which to clip RFI.\n\n Parameters\n ----------\n spectrum : array-like\n A 1D or 2D array, where the last axis corresponds to frequency. The data\n measured at those frequencies.\n weights : array-like\n The weights associated with the data (same shape as `spectrum`).\n window_width : int, optional\n The width of the moving window in number of channels.\n n_poly : int, optional\n Number of polynomial terms to fit in each sliding window. Should be significantly\n smaller than ``window_width``.\n n_bootstrap : int, optional\n Number of bootstrap samples to take to estimate the standard deviation of\n the data without RFI.\n n_sigma : float, optional\n The number of sigma at which to threshold RFI.\n use_median : bool, optional\n Instead of using bootstrap for the initial window, use Median Absolute Deviation.\n flip : bool, optional\n Whether to *also* do the analysis backwards, doing a logical OR on the final\n flags.\n\n Returns\n -------\n flags : array-like\n Boolean array of the same shape as ``spectrum`` indicated which channels/times\n have flagged RFI.\n \"\"\"\n nf = spectrum.shape[-1]\n f = np.linspace(-1, 1, window_width)\n flags = np.zeros(spectrum.shape, dtype=bool)\n\n if weights is not None:\n flags |= weights <= 0\n\n class NoDataError(Exception):\n pass\n\n def compute_resid(d, flagged):\n mask = ~flagged\n if np.any(mask):\n par = np.polyfit(f[mask], d[mask], n_poly - 1)\n return d[mask] - np.polyval(par, f[mask]), mask\n else:\n raise NoDataError\n\n # Compute residuals for initial section\n got_init = False\n window = np.arange(window_width)\n while not got_init and window[-1] < nf:\n try:\n r, mask = compute_resid(spectrum[window], flags[window])\n got_init = True\n\n except NoDataError:\n window += 1\n\n if not got_init:\n raise NoDataError(\n \"There were no windows of data with enough data to perform xrfi.\"\n )\n\n # Computation of STD for initial section using the median statistic\n if not use_median:\n r_choice_std = [\n np.std(np.random.choice(r, len(r) // 2)) for _ in range(n_bootstrap)\n ]\n r_std = np.median(r_choice_std)\n else:\n r_std = _get_mad(r)\n\n print(r_std)\n\n # Set this window's flags to true.\n flags[:window_width][mask] |= np.abs(r) > n_sigma * r_std\n\n # Initial window limits\n window += 1\n while window[-1] < nf:\n # Selecting section of data of width \"window_width\"\n try:\n r, fmask = compute_resid(spectrum[window], flags[window])\n except NoDataError:\n continue\n\n flags[window][fmask][np.abs(r) > n_sigma * r_std] = True\n\n # Update std dev. estimate for the next window.\n r_std = _get_mad(r) if use_median else np.std(r)\n window += 1\n\n if flip:\n flip_flags = xrfi_poly_filter(\n np.flip(spectrum),\n np.flip(weights) if weights is not None else None,\n window_width=window_width,\n n_poly=n_poly,\n n_bootstrap=n_bootstrap,\n n_sigma=n_sigma,\n use_median=use_median,\n flip=False,\n )\n flags |= np.flip(flip_flags)\n\n return flags\n\n\ndef xrfi_poly(\n spectrum,\n flags=None,\n f_ratio=None,\n f_log=False,\n t_log=True,\n n_signal=3,\n n_resid=-1,\n threshold=10,\n max_iter=20,\n accumulate=False,\n increase_order=True,\n decrement_threshold=0,\n min_threshold=5,\n return_models=False,\n inplace=True,\n watershed: [None, int, Tuple[int, float], np.ndarray] = None,\n):\n \"\"\"\n Flag RFI by subtracting a smooth polynomial and iteratively removing outliers.\n\n On each iteration, a polynomial is fit to the unflagged data, and a lower-order\n polynomial is fit to the absolute residuals of the data with the model polynomial.\n Bins with absolute residuals greater than `n_abs_resid_threshold` are flagged,\n and the process is repeated until no new flags are found.\n\n Parameters\n ----------\n spectrum : array-like\n A 1D or 2D array, where the last axis corresponds to frequency. The data\n measured at those frequencies.\n flags : array-like, optional\n The flags associated with the data (same shape as `spectrum`).\n f_ratio : float, optional\n The ratio of the max to min frequency to be fit. Only required if ``f_log``\n is True.\n f_log : bool, optional\n Whether to fit the signal with log-spaced frequency values.\n t_log : bool, optional\n Whether to fit the signal with log temperature.\n n_signal : int, optional\n The number of polynomial terms to use to fit the signal.\n n_resid : int, optional\n The number of polynomial terms to use to fit the residuals.\n threshold : float, optional\n The factor by which the absolute residual model is multiplied to determine\n outliers.\n max_iter : int, optional\n The maximum number of iterations to perform.\n accumulate : bool, optional\n Whether to accumulate flags on each iteration.\n increase_order : bool, optional\n Whether to increase the order of the polynomial on each iteration.\n decrement_threshold : float, optional\n An amount to decrement the threshold by every iteration. Threshold will never\n go below ``min_threshold``.\n min_threshold : float, optional\n The minimum threshold to decrement to.\n return_models : bool, optional\n Whether to return the full models at each iteration.\n inplace : bool, optional\n Whether to fill up given flags array with the updated flags.\n watershed : int, tuple or ndarray, optional\n Specify a scheme for identifying channels surrounding a flagged channel as RFI.\n If an int, that many channels on each side of the flagged channel will be flagged.\n If a tuple, should be (int, float), where the int specifies the number of channels\n on each side, and the float specifies a threshold *with respect to* the overall\n threshold for flagging (so this should be less than one). If an array, the values\n represent this threshold where the central bin of the array is placed on the\n flagged channel.\n\n Returns\n -------\n flags : array-like\n Boolean array of the same shape as ``spectrum`` indicated which channels/times\n have flagged RFI.\n \"\"\"\n if decrement_threshold > 0 and min_threshold > threshold:\n warnings.warn(\n f\"You've set a threshold smaller than the min_threshold of {min_threshold}. Will use threshold={min_threshold}.\"\n )\n threshold = min_threshold\n\n if f_log and not f_ratio:\n raise ValueError(\"If fitting in log(freq), you must provide f_ratio.\")\n\n assert threshold > 1.5\n\n nf = spectrum.shape[-1]\n f = np.linspace(-1, 1, nf) if not f_log else np.logspace(0, f_ratio, nf)\n\n orig_flags = flags if flags is not None else np.zeros(nf, dtype=bool)\n orig_flags |= (spectrum <= 0) | np.isnan(spectrum) | np.isinf(spectrum)\n\n flags = orig_flags.copy()\n\n if not increase_order:\n assert n_resid < n_signal\n\n # Set the watershed as a small array that will overlay a flag.\n if isinstance(watershed, int):\n # By default, just kill all surrounding channels\n watershed = np.zeros(watershed * 2 + 1)\n watershed[len(watershed) // 2] = 1\n elif watershed is not None and len(watershed) == 2:\n # Otherwise, can provide weights per-channel.\n watershed = np.ones(watershed[0] * 2 + 1) * watershed[1]\n\n n_flags_changed = 1\n counter = 0\n\n n_flags_changed_list = []\n total_flags_list = []\n model_list = []\n model_std_list = []\n while n_flags_changed > 0 and counter < max_iter and np.sum(~flags) > n_signal * 2:\n ff = f[~flags]\n s = spectrum[~flags]\n\n if t_log:\n s = np.log(s)\n\n par = np.polyfit(ff, s, n_signal)\n model = np.polyval(par, f)\n if return_models:\n model_list.append(par)\n if t_log:\n model = np.exp(model)\n\n res = spectrum - model\n\n par = np.polyfit(\n ff, np.abs(res[~flags]), n_resid if n_resid > 0 else n_signal + n_resid\n )\n model_std = np.polyval(par, f)\n if return_models:\n model_std_list.append(par)\n\n if accumulate:\n nflags = np.sum(flags[~flags])\n flags[~flags] |= np.abs(res)[~flags] > threshold * model_std[~flags]\n n_flags_changed = np.sum(flags[~flags]) - nflags\n else:\n new_flags = orig_flags | (np.abs(res) > threshold * model_std)\n\n # Apply a watershed -- assume surrounding channels will succumb to RFI.\n if watershed is not None:\n watershed_flags = np.zeros_like(new_flags)\n # Go through each flagged channel\n for channel in np.where(new_flags)[0]:\n rng = range(\n max(0, channel - len(watershed) // 2),\n min(len(new_flags), channel + len(watershed) // 2 + 1),\n )\n wrng_min = max(0, -(channel - len(watershed) // 2))\n wrng = range(wrng_min, wrng_min + len(rng))\n\n watershed_flags[rng] |= (\n np.abs(res[rng]) > watershed[wrng] * threshold * model_std[rng]\n )\n new_flags |= watershed_flags\n\n n_flags_changed = np.sum(flags ^ new_flags)\n flags = new_flags.copy()\n\n counter += 1\n if increase_order:\n n_signal += 1\n\n threshold = max(threshold - decrement_threshold, min_threshold)\n\n n_flags_changed_list.append(n_flags_changed)\n total_flags_list.append(np.sum(flags))\n\n if counter == max_iter:\n warnings.warn(\n f\"max iterations ({max_iter}) reached, not all RFI might have been caught.\"\n )\n\n if np.sum(~flags) <= n_signal * 2:\n warnings.warn(\n \"Termination of iterative loop due to too many flags. Reduce n_signal or check data.\"\n )\n\n if inplace:\n orig_flags |= flags\n\n return (\n flags,\n {\n \"n_flags_changed\": n_flags_changed_list,\n \"total_flags\": total_flags_list,\n \"models\": model_list,\n \"model_std\": model_std_list,\n \"n_iters\": counter,\n },\n )\n\n\ndef xrfi_watershed(flags: np.ndarray, tol: [float, Tuple[float]] = 0.2, inplace=False):\n \"\"\"Applies a watershed over frequencies and times for flags, making sure\n that times/freqs with many flags are all flagged.\n\n Parameters\n ----------\n flags : ndarray of bool\n The existing flags.\n tol : float or tuple\n The tolerance -- i.e. the fraction of entries that must be flagged before\n flagging the whole axis. If a tuple, the first element is for the frequency\n axis, and the second for the time axis.\n inplace : bool, optional\n Whether to update the flags in-place.\n\n Returns\n -------\n ndarray :\n Boolean array of flags.\n \"\"\"\n fl = flags if inplace else flags.copy()\n\n if not hasattr(tol, \"__len__\"):\n tol = (tol, tol)\n\n freq_coll = np.sum(flags, axis=1)\n freq_mask = freq_coll > tol[0] * flags.shape[1]\n fl[freq_mask] = True\n\n time_coll = np.sum(fl, axis=0)\n time_mask = time_coll > tol[1] * flags.shape[0]\n fl[:, time_mask] = True\n return fl\n", "sub_path": "src/edges_cal/xrfi.py", "file_name": "xrfi.py", "file_ext": "py", "file_size_in_byte": 32231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.Tuple", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.finfo", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.true_divide", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 90, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 93, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 168, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.generic_filter", "line_number": 169, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 169, "usage_type": "name"}, {"api_name": "scipy.ndimage", "line_number": 177, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 186, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 187, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 253, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 254, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 299, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 301, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 410, "usage_type": "call"}, {"api_name": "modelling.ModelFit", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 440, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 456, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 458, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 515, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 515, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 581, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 591, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 592, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 599, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 616, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 616, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 616, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 618, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 625, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 636, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 639, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 644, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 653, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 674, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 674, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 736, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 747, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 747, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 749, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 750, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 750, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 760, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 764, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 773, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 778, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 780, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 781, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 785, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 789, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 790, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 792, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 797, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 798, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 799, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 801, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 805, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 807, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 816, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 820, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 830, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 833, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 837, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 838, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 857, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 857, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 882, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 886, "usage_type": "call"}]} +{"seq_id": "330567540", "text": "from config import Config\nfrom query import Query\nfrom parser import Parser\nfrom common_utils import *\n\nclass CategoryVendorDetails(Parser):\n\n def merge_category_ids(self, productid_list):\n product_dict = dict()\n for product in productid_list:\n entity_id = product.get('entity_id')\n if entity_id in product_dict:\n categoryid_list = product_dict.get(entity_id).get('category_id')\n categoryname_list = product_dict.get(entity_id).get('category_name')\n if str(product.get('category_id')) not in categoryid_list:\n categoryid_list.append(str(product.get('category_id')))\n categoryname_list.append(str(product.get('category_name')))\n product_dict.get(entity_id).update({'category_id': categoryid_list})\n product_dict.get(entity_id).update({'category_name': categoryname_list})\n else:\n product['category_id'] = [str(product['category_id'])]\n product['category_name'] = [str(product['category_name'])]\n product_dict.update({entity_id: product})\n return product_dict.values()\n \n def parse_data(self, product_detail_list):\n product_detail_list = self.merge_category_ids(product_detail_list)\n to_string_list = ['entity_id', 'is_international_enabled', 'shipping_cost',\n 'is_in_stock', 'qty', 'vendor_id', 'international_shipping_cost',\n 'variant_id', 'custom_vars_combined', 'updated_at']\n for product in product_detail_list:\n product.update({'qty': int(product.get('qty'))})\n product.update(convert_to_string(to_string_list, product))\n self.add_vendor_info(product)\n self.add_vendor_country(product)\n self.add_dispatch_time_info(product)\n return product_detail_list\n\n \n def get_details(self, query, productid_list):\n '''A.entity_id, A.name, A.is_in_stock, A.url_path, A.shipping_cost, A.international_shipping_cost,\n 'A.variant_id, A.winning_variant, A.category_id, A.quantity, A.vendor_sku, A.updated_at,\n 'B.vendor_id, B.vendor_name, C.vendor_owner, C.vendor_city, C.vendor_pincode, C.vendor_name,\n 'C.vendor_logo, C.vendor_cod, C.allow_international_shipping, C.vendor_url, D.seller_ratings '''\n query = query % ', '.join(productid_list)\n result = query_db(query, Config.READ_DATABASE, Config.PSQL_READ)\n return self.parse_data(result)\n \n \nclass OmnitureDetails(CategoryVendorDetails):\n \n def parse_data(self, product_detail_list):\n to_string_list = ['enabled', 'wishlisted', 'entity_id', 'relevance_score',\n 'sales_3days', 'sales_total', 'variant_order', 'variant_id']\n for product in product_detail_list:\n self.select_winning_variant(product)\n self.add_images_field(product)\n product = convert_to_string(to_string_list, product)\n product = self.add_attributes_field(product)\n self.add_chart_list(product)\n return product_detail_list\n \n \n def get_details(self, query, productid_list):\n '''A.relevance_score, A.wishlisted, A.sales_total, A.sales_3days,\n B.attributes, B.variant_order, B.product_id as entity_id '''\n query = query % (', '.join(productid_list), ', '.join(productid_list))\n result = query_db(query, Config.READ_DATABASE, Config.PSQL_READ)\n return self.parse_data(result)\n \n \nclass PriceDetails(CategoryVendorDetails):\n \n '''def map_attributes(self, product):\n attribute_map = { '60': 'price', '61': 'discounted_price', \n '57': 'long_description', '58': 'short_description',\n '62': 'discount_start_date', '63': 'discount_end_date',\n '535': 'shipping_cost', '655': 'international_shipping_cost',\n '56': 'name', '82': 'url_path'\n }\n product.update({attribute_map.get(product.get('a1')): str(product.get('v1'))})\n product.update({attribute_map.get(product.get('a2')): str(product.get('v2'))})\n product.update({attribute_map.get(product.get('a3')): str(product.get('v3'))})\n product.update({attribute_map.get(product.get('a4')): str(product.get('v4'))})\n product.pop('a1')\n product.pop('v1')\n product.pop('a2')\n product.pop('v2')\n product.pop('a3')\n product.pop('v3')\n product.pop('a4')\n product.pop('v4')'''\n \n def merge_attributes(self, product_list):\n product_merged_attributes = dict()\n for product in product_list:\n product_detail = product_merged_attributes.get(product.get('entity_id', {}), {})\n product_detail.update(product)\n product_merged_attributes.update({product.get('entity_id'): product_detail})\n return product_merged_attributes.values()\n \n \n def parse_data(self, product_detail_list):\n to_string_list = ['price', 'discounted_price', 'long_description', 'short_description', \n 'discount_start_date', 'discount_end_date', 'shipping_cost', \n 'international_shipping_cost', 'name', 'url_path', 'entity_id']\n for product in product_detail_list:\n product.update(convert_to_string(to_string_list, product))\n merged_attributes = self.merge_attributes(product_detail_list)\n map(lambda product: self.add_name_fields(product), merged_attributes)\n map(lambda product: product.update({'color': self.get_color(product)}), merged_attributes)\n map(lambda product: self.get_discount_percentage(product), merged_attributes)\n return merged_attributes\n\n def get_details(self, query, productid_list):\n ''' A.entity_id, A.value as v1, A.attribute_id as a1, \n B.attribute_id as a2, B.value as v2'''\n return super(PriceDetails, self).get_details(query, productid_list)\n \n", "sub_path": "category_vendor_details.py", "file_name": "category_vendor_details.py", "file_ext": "py", "file_size_in_byte": 6148, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "parser.Parser", "line_number": 6, "usage_type": "name"}, {"api_name": "config.Config.READ_DATABASE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 46, "usage_type": "name"}, {"api_name": "config.Config.PSQL_READ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "config.Config.READ_DATABASE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 68, "usage_type": "name"}, {"api_name": "config.Config.PSQL_READ", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "48336417", "text": "import contextlib\nimport dataclasses\nimport functools\nimport itertools\nimport logging\nimport sys\nimport unittest\nimport warnings\n\nfrom functools import wraps\nfrom typing import Any, Callable, Dict, List, Optional, Sequence\n\nfrom functorch.compile import min_cut_rematerialization_partition\n\nimport torch._functorch.config as functorch_config\n\nimport torch.fx\nimport torch.utils._pytree as pytree\nfrom torch._dynamo import (\n compiled_autograd,\n logging as dynamo_logging,\n utils as dynamo_utils,\n)\nfrom torch._dynamo.utils import detect_fake_mode\nfrom torch._functorch.aot_autograd import make_boxed_func\nfrom torch._inductor.codecache import code_hash, CompiledFxGraph\nfrom torch._ops import OpOverload\nfrom torch._subclasses.fake_tensor import FakeTensor\nfrom torch.fx.passes.fake_tensor_prop import FakeTensorProp\n\nfrom .._dynamo.backends.common import aot_autograd\nfrom ..fx.graph import _PyTreeCodeGen\nfrom . import config, metrics\nfrom .debug import DebugContext\nfrom .decomposition import select_decomp_table\nfrom .fx_passes.joint_graph import joint_graph_passes\nfrom .fx_passes.post_grad import post_grad_passes, view_to_reshape\nfrom .fx_passes.pre_grad import pre_grad_passes\nfrom .graph import GraphLowering\nfrom .pattern_matcher import clone_graph\nfrom .utils import get_dtype_size, has_incompatible_cudagraph_ops\nfrom .virtualized import V\n\nif config.is_fbcode():\n from torch._inductor.fb.utils import time_and_log\nelse:\n # no-op decorator\n def time_and_log(attr: str):\n def wrap(old_func):\n @wraps(old_func)\n def newFunction(*args, **kwargs):\n return old_func(*args, **kwargs)\n\n return newFunction\n\n return wrap\n\n\nlog = logging.getLogger(__name__)\nperf_hint_log = torch._logging.getArtifactLogger(__name__, \"perf_hints\")\nALIGNMENT = 16\n\n\n@dataclasses.dataclass\nclass BoxedBool:\n value: bool\n\n def __bool__(self):\n return self.value\n\n @staticmethod\n def disable(obj):\n if isinstance(obj, BoxedBool):\n obj.value = False\n return obj\n return False\n\n\n@dataclasses.dataclass\nclass BoxedDeviceIndex:\n value: Optional[int]\n\n def set(self, device_idx):\n assert device_idx is None or isinstance(device_idx, int)\n self.value = device_idx\n\n\n# copy_ fails when trying to write to tensors with memory overlap,\n# for expanded dimensions (a dimension which used to have size 1 -> ?)\n# we can select one element from that dimension and write to it\n# to achieve writing to all values of that dimension of the input tensor\ndef get_expanded_dims(t):\n if not isinstance(t, torch.Tensor):\n return None\n return [i for i in range(t.ndim) if t.stride(i) == 0 and t.size(i) != 1]\n\n\ndef index_expanded_dims(t, expanded_dims):\n for expanded_dim in expanded_dims:\n t = torch.ops.aten.slice(t, expanded_dim, 0, 1)\n return t\n\n\ndef complex_memory_overlap(t):\n # if torch._debug_has_internal_overlap thinks this tensor potentially has\n # memory overlap internally, let's dig deeper to find out whether it's true.\n t = index_expanded_dims(t, get_expanded_dims(t))\n if torch._debug_has_internal_overlap(t) != 0:\n strides = t.stride()\n sizes = t.shape\n indices = list(range(len(strides)))\n indices = [x for _, x in sorted(zip(strides, indices))]\n for i in range(len(strides)):\n prev_stride = 1 if i == 0 else strides[indices[i - 1]]\n prev_size = 1 if i == 0 else sizes[indices[i - 1]]\n if strides[indices[i]] < prev_stride * prev_size:\n return True\n return False\n\n\n@functools.lru_cache(None)\ndef _step_logger():\n return dynamo_logging.get_step_logger(log)\n\n\n@functools.lru_cache(None)\ndef _warn_tf32_disabled():\n if (\n torch.cuda.is_available()\n and not torch.backends.cuda.matmul.allow_tf32\n and torch.cuda.get_device_capability() >= (8, 0)\n ):\n warnings.warn(\n \"TensorFloat32 tensor cores for float32 matrix multiplication available but not enabled. \"\n \"Consider setting `torch.set_float32_matmul_precision('high')` for better performance.\"\n )\n\n\ndef is_tf32_warning_applicable(gm: torch.fx.GraphModule):\n aten = torch.ops.aten\n tf32_ops = {\n aten.mm.default,\n aten.addmm.default,\n aten.bmm.default,\n aten.baddbmm.default,\n }\n for node in gm.graph.nodes:\n if (\n node.op == \"call_function\"\n and node.target in tf32_ops\n and isinstance(node.meta.get(\"val\", None), torch.Tensor)\n and node.meta[\"val\"].dtype == torch.float32\n and node.meta[\"val\"].device.type == \"cuda\"\n ):\n return True\n return False\n\n\n@DebugContext.wrap\ndef count_bytes_inner(gm, example_inputs, num_fixed=0, **kwargs):\n shape_env = _shape_env_from_inputs(example_inputs)\n\n graph = GraphLowering(gm, shape_env=shape_env, num_static_inputs=num_fixed)\n with V.set_graph_handler(graph):\n graph.run(*example_inputs)\n num_bytes, nodes_num_elem = graph.count_bytes()\n metrics.num_bytes_accessed += num_bytes\n metrics.nodes_num_elem += nodes_num_elem\n return make_boxed_func(gm.forward)\n\n\ndef inner_compile_with_cpp_wrapper(inner_compile):\n @functools.wraps(inner_compile)\n def wrapper(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor], **kwargs):\n \"\"\"\n Compile into cpp wrapper:\n For CPU, this is currently done in one pass.\n For GPU, this is done in two passes: JIT-compile the model with python wrapper code\n and run it to generate autotuned kernel binaries in the first pass; and then generate\n cpp wrapper code and compile it to a dynamic library in the second pass.\n \"\"\"\n devices = (\n {t.device.type for t in gm.parameters()}\n | {t.device.type for t in gm.buffers()}\n | {t.device.type for t in example_inputs if isinstance(t, torch.Tensor)}\n )\n\n if \"cuda\" not in devices:\n kwargs_patched = {**kwargs, \"cpp_wrapper\": True}\n return inner_compile(gm, example_inputs, **kwargs_patched)\n else:\n with config.patch(\n {\n \"triton.store_cubin\": True,\n }\n ):\n # first pass with regular python wrapper code\n kwargs_patched = {\n **kwargs,\n \"cpp_wrapper\": False,\n }\n # clone_graph(gm) makes sure no graph modification from the first pass will\n # leak to the second pass. It does increase memory pressure, but the problem\n # can be alleviated once we have parameters as FakeTensor.\n\n compiled = inner_compile(\n clone_graph(gm), example_inputs, **kwargs_patched\n )\n if torch._guards.TracingContext.get().output_strides:\n torch._guards.TracingContext.get().output_strides.clear()\n\n def materialize(x):\n if isinstance(x, (torch.SymInt, torch.SymFloat)):\n # Need concrete value to run dynamic shapes and tune the result\n return x.node.hint\n else:\n assert not isinstance(x, FakeTensor)\n return x\n\n assert torch._guards.TracingContext.get()\n real_inputs = [\n materialize(x)\n for x in [\n *[\n param\n for param in torch._guards.TracingContext.get().params_flat\n if param is not None\n ],\n *V.real_inputs,\n ]\n ]\n\n with torch.utils._python_dispatch._disable_current_modes():\n compiled(real_inputs)\n\n real_inputs = None\n\n # second pass\n kwargs_patched = {**kwargs, \"cpp_wrapper\": True}\n return inner_compile(gm, example_inputs, **kwargs_patched)\n\n return wrapper\n\n\ndef fake_tensor_prop(\n gm: torch.fx.GraphModule,\n example_inputs: List[torch.Tensor],\n force_allow_non_fake_inputs=False,\n):\n \"\"\"\n If we can not detect fake mode from the context of inputs, create one.\n\n The created fake mode will be returned.\n \"\"\"\n fake_mode = detect_fake_mode(example_inputs)\n if not fake_mode:\n fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)\n FakeTensorProp(gm, mode=fake_mode).propagate(*example_inputs)\n else:\n ctx = (\n contextlib.nullcontext()\n if not force_allow_non_fake_inputs\n else unittest.mock.patch.object(fake_mode, \"allow_non_fake_inputs\", True)\n )\n with ctx:\n FakeTensorProp(gm, mode=fake_mode).propagate_dont_convert_inputs(\n *example_inputs\n )\n\n return fake_mode\n\n\n@DebugContext.wrap\n@torch.utils._python_dispatch._disable_current_modes()\n@time_and_log(attr=\"compilation time (in seconds)\")\ndef compile_fx_inner(\n gm: torch.fx.GraphModule,\n example_inputs: List[torch.Tensor],\n cudagraphs: Optional[BoxedBool] = None,\n num_fixed=0,\n is_backward=False,\n graph_id=None,\n cpp_wrapper=False,\n aot_mode=False,\n is_inference=False,\n boxed_forward_device_index=None,\n user_visible_outputs=frozenset(),\n layout_opt=None,\n):\n if dynamo_utils.count_calls(gm.graph) == 0:\n return make_boxed_func(gm.forward)\n\n if cudagraphs is None:\n cudagraphs = BoxedBool(config.triton.cudagraphs)\n\n # Inputs to fx_codegen_and_compile\n graph_args = [gm, example_inputs]\n graph_kwargs = {\n \"cudagraphs\": cudagraphs,\n \"num_fixed\": num_fixed,\n \"is_backward\": is_backward,\n \"graph_id\": graph_id,\n \"cpp_wrapper\": cpp_wrapper,\n \"aot_mode\": aot_mode,\n \"is_inference\": is_inference,\n \"user_visible_outputs\": user_visible_outputs,\n \"layout_opt\": layout_opt,\n }\n\n compiled_graph: CompiledFxGraph = fx_codegen_and_compile(\n *graph_args, **graph_kwargs\n )\n\n if aot_mode:\n return compiled_graph\n\n if cudagraphs:\n # output args are tuple of first argument\n output = list(gm.graph.nodes)[-1]\n assert len(output.args) == 1\n stack_traces = [\n (arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)\n for arg in output.args[0]\n ]\n\n complex_memory_overlap_inputs = any(\n complex_memory_overlap(t)\n for t in example_inputs\n if isinstance(t, torch.Tensor)\n )\n\n # doesnt work for non-trees because the warmup run would apply mutation twice\n if config.triton.cudagraph_trees:\n # checking if mutation is only on paramameters/static inputs\n has_mutation = not all(\n idx < num_fixed for idx in compiled_graph.mutated_input_idxs\n )\n else:\n has_mutation = len(compiled_graph.mutated_inputs) != 0\n\n cudagraph_tests = [\n (set(compiled_graph.device_types) == {\"cuda\"}, \"non-cuda device in graph\"),\n (not has_mutation, \"mutated inputs\"),\n (not has_incompatible_cudagraph_ops(gm), \"incompatible ops\"),\n (not complex_memory_overlap_inputs, \"complex memory overlap\"),\n (\n all(\n isinstance(t, (torch.Tensor, torch.SymInt)) for t in example_inputs\n ),\n \"non-Tensor inputs\",\n ),\n (\n (\n len(compiled_graph.device_idxs) == 1\n or not config.triton.cudagraph_trees\n ),\n \"multiple device indices without cudagraph_trees\",\n ),\n ]\n cudagraph_fail_reasons = [s for b, s in cudagraph_tests if not b]\n\n if not cudagraph_fail_reasons:\n if not config.triton.cudagraph_trees:\n # Force specialize all inputs so that CUDA graphs will work\n for t in example_inputs:\n if isinstance(t, torch.SymInt):\n int(t) # guard\n\n if (\n boxed_forward_device_index is not None\n and not is_inference\n and not is_backward\n ):\n boxed_forward_device_index.set(next(iter(compiled_graph.device_idxs)))\n\n compiled_graph.current_callable = cudagraphify(\n compiled_graph.get_current_callable(),\n example_inputs,\n static_input_idxs=range(num_fixed),\n device_index=next(iter(compiled_graph.device_idxs)),\n stack_traces=stack_traces,\n is_backward=is_backward,\n is_inference=is_inference,\n )\n else:\n BoxedBool.disable(cudagraphs)\n\n # See [Backward Generation Handling]\n # if cudagraph'd the forward and set the device, we need to let the cudagraph manager\n # know we are we running the backward even if we will not run it in cudagraphs\n if is_backward and config.triton.cudagraph_trees:\n assert boxed_forward_device_index.value is not None\n compiled_graph_callable = compiled_graph.get_current_callable()\n\n manager = torch._inductor.cudagraph_trees.get_manager(\n boxed_forward_device_index.value, create_if_none_exists=False\n )\n # should already exist from forward\n assert manager is not None\n\n def compiled_artifact(new_inputs):\n manager.set_to_running_backward()\n return compiled_graph_callable(new_inputs)\n\n compiled_graph.current_callable = compiled_artifact\n\n if len(set(compiled_graph.device_types)) > 1:\n perf_hint_log.warning(\"skipping cudagraphs due to multiple devices\")\n elif set(compiled_graph.device_types) == {\"cuda\"}:\n if has_mutation:\n perf_hint_log.warning(\"skipping cudagraphs due to input mutation\")\n elif complex_memory_overlap_inputs:\n perf_hint_log.warning(\n \"skipping cudagraphs due to complex input striding\"\n )\n elif (\n len(compiled_graph.device_idxs) > 1\n and config.triton.cudagraph_trees\n ):\n perf_hint_log.warning(\n \"skipping cudagraphs due to multiple device indexes\"\n )\n else:\n perf_hint_log.warning(\"skipping cudagraphs for unknown reason\")\n else:\n perf_hint_log.warning(\"skipping cudagraphs for unknown reason\")\n\n # cudagraphs does its own aligning of inputs\n if not cudagraphs:\n new_callable = align_inputs(\n compiled_graph.get_current_callable(), example_inputs, range(num_fixed)\n )\n if new_callable is not compiled_graph.get_current_callable():\n compiled_graph.current_callable = new_callable\n\n _step_logger()(\n logging.INFO,\n \"torchinductor done compiling \"\n f\"{'BACKWARDS' if is_backward else 'FORWARDS'} \"\n f\"graph {graph_id}\",\n )\n\n # aot autograd needs to know to pass in inputs as a list\n compiled_graph._boxed_call = True\n return compiled_graph\n\n\ndef fx_codegen_and_compile(\n gm: torch.fx.GraphModule,\n example_inputs: List[torch.Tensor],\n cudagraphs: Optional[BoxedBool] = None,\n num_fixed=0,\n is_backward=False,\n graph_id=None,\n cpp_wrapper=False,\n aot_mode=False,\n is_inference=False,\n user_visible_outputs=frozenset(),\n layout_opt=None,\n) -> CompiledFxGraph:\n if is_tf32_warning_applicable(gm):\n _warn_tf32_disabled()\n\n # lift the maximum depth of the Python interpreter stack\n # to adapt large/deep models\n sys.setrecursionlimit(max(sys.getrecursionlimit(), 2000))\n\n _step_logger()(\n logging.INFO,\n \"torchinductor compiling \"\n f\"{'BACKWARDS' if is_backward else 'FORWARDS'} \"\n f\"graph {graph_id}\",\n )\n V.debug.fx_graph(gm, example_inputs)\n\n shape_env = _shape_env_from_inputs(example_inputs)\n\n # Convert view to reshape in the graph. This is necessary primarily for\n # layout optimization. Do it unconditionally for uniformity.\n #\n # It's needed because when we do layout optimization, an contiguous tensor\n # in eager mode may becomes a channels last tensor. A view op previously\n # can be applied to the contiguous tensor may not be able to be applied\n # on the channels tensor any more. An error like\n # RuntimeError: view size is not compatible with input tensor's size and stride\n # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.\n # will be printed.\n #\n # Replace view op to reshape op in this case.\n # As an example, timm_resnest/botnet26t_256/convnext_base etc. will fail if we don't do this.\n #\n # Also this has to be done before FakeTensorProp below to avoid the failed\n # .view() call.\n view_to_reshape(gm)\n\n fake_mode = fake_tensor_prop(gm, example_inputs)\n\n # pattern matcher passes might not preserve striding information\n # on node.meta[\"val\"]. if in the future we rely on these being\n # correct we will need to fix.\n\n with V.set_fake_mode(fake_mode):\n # has some issues with memory in training\n post_grad_passes(gm, is_inference=is_inference)\n V.debug.fx_graph_transformed(gm, example_inputs)\n\n with V.set_fake_mode(fake_mode):\n graph = GraphLowering(\n gm,\n shape_env=shape_env,\n num_static_inputs=num_fixed,\n graph_id=graph_id,\n cpp_wrapper=cpp_wrapper,\n aot_mode=aot_mode,\n user_visible_outputs=user_visible_outputs,\n )\n with V.set_graph_handler(graph):\n graph.run(*example_inputs)\n context = torch._guards.TracingContext.get()\n if context is not None and context.output_strides is not None:\n # Return the output strides to the caller via TracingContext\n assert len(context.output_strides) == 0\n for out in graph.graph_outputs:\n if hasattr(out, \"layout\"):\n context.output_strides.append(\n tuple(\n V.graph.sizevars.size_hint(s) for s in out.layout.stride\n )\n )\n else:\n context.output_strides.append(None)\n compiled_fn = graph.compile_to_fn()\n\n if graph.disable_cudagraphs:\n BoxedBool.disable(cudagraphs)\n\n compiled_graph = CompiledFxGraph(\n compiled_artifact=compiled_fn,\n cache_key=graph.cache_key,\n artifact_path=graph.cache_path,\n cache_linemap=graph.cache_linemap,\n device_types=graph.device_types,\n device_idxs=graph.device_idxs,\n mutated_inputs=graph.mutated_inputs,\n mutated_input_idxs=graph.mutated_input_idxs,\n )\n return compiled_graph\n\n\ndef clone_preserve_strides(x):\n needed_size = (\n sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1\n )\n buffer = torch.as_strided(x, (needed_size,), (1,)).clone()\n return torch.as_strided(buffer, x.size(), x.stride())\n\n\ndef copy_misaligned_inputs(new_inputs, check_inputs_idxs: Sequence[int]) -> None:\n for i in check_inputs_idxs:\n if new_inputs[i].data_ptr() % ALIGNMENT:\n new_inputs[i] = clone_preserve_strides(new_inputs[i])\n\n\ndef get_input_idxs_to_check(inputs, static_input_idxs) -> Sequence[int]:\n def is_aligned(storage_offset, dtype):\n return (storage_offset * get_dtype_size(dtype)) % ALIGNMENT == 0\n\n return [\n i\n for i in range(len(inputs))\n if isinstance(inputs[i], torch.Tensor)\n and (\n i not in static_input_idxs\n or not is_aligned(inputs[i].storage_offset(), inputs[i].dtype)\n )\n and inputs[i].device.type == \"cuda\"\n ]\n\n\ndef align_inputs_from_check_idxs(model, inputs_to_check: Sequence[int]):\n if len(inputs_to_check) == 0:\n return model\n\n def run(new_inputs):\n copy_misaligned_inputs(new_inputs, inputs_to_check)\n return model(new_inputs)\n\n return run\n\n\ndef align_inputs(model, inputs, static_input_idxs=()):\n inputs_to_check = get_input_idxs_to_check(inputs, static_input_idxs)\n return align_inputs_from_check_idxs(model, inputs_to_check)\n\n\n@dynamo_utils.dynamo_timed\ndef cudagraphify(\n model,\n inputs,\n static_input_idxs=(),\n *,\n device_index: int,\n stack_traces: List[Optional[str]],\n is_backward: bool,\n is_inference: bool,\n):\n from torch._inductor.cudagraph_trees import (\n cudagraphify_impl as new_cudagraphify_impl,\n )\n\n if config.triton.cudagraph_trees:\n cudagraphify_fn = functools.partial(\n new_cudagraphify_impl,\n device_index=device_index,\n stack_traces=stack_traces,\n is_backward=is_backward,\n is_inference=is_inference,\n )\n else:\n cudagraphify_fn = cudagraphify_impl\n\n # if using fake tensors, defer cudagraphs until we get real inputs at runtime\n if not any(isinstance(inp, FakeTensor) for inp in inputs):\n return cudagraphify_fn(model, inputs, static_input_idxs)\n\n compiled_fn = None\n\n def run(new_inputs):\n nonlocal compiled_fn\n if compiled_fn is None:\n with dynamo_utils.preserve_rng_state():\n compiled_fn = cudagraphify_fn(model, new_inputs, static_input_idxs)\n return compiled_fn(new_inputs)\n\n return run\n\n\ndef remove_unaligned_input_idxs(inputs, static_input_idxs):\n \"\"\"\n We require all inputs to be aligned, so introduce a copy for any\n that aren't.\n \"\"\"\n aligned_static_input_idxs = {\n idx\n for idx in static_input_idxs\n if isinstance(inputs[idx], torch.Tensor)\n and (inputs[idx].data_ptr() % ALIGNMENT) == 0\n }\n if len(aligned_static_input_idxs) != len(static_input_idxs):\n return aligned_static_input_idxs\n return static_input_idxs\n\n\ndef static_input(x):\n \"\"\"\n Copy and input while preserving strides\n \"\"\"\n # TODO(jansel): figure out why this version doesn't work:\n # return torch.empty_strided(x.size(), x.stride(), dtype=x.dtype, device=x.device)\n needed_size = (\n sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1\n )\n buffer = torch.empty(needed_size, dtype=x.dtype, device=x.device)\n return torch.as_strided(buffer, x.size(), x.stride())\n\n\ndef index_expanded_dims_and_copy_(dst, src, expanded_dims):\n \"Index into expanded dimensions of both dst and src then copy_\"\n dst = index_expanded_dims(dst, expanded_dims)\n src = index_expanded_dims(src, expanded_dims)\n dst.copy_(src)\n\n\ndef cudagraphify_impl(model, inputs, static_input_idxs=()):\n \"\"\"\n Assumes inputs[static_input_idxs[i]] are always the same memory address\n \"\"\"\n check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)\n static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)\n copy_misaligned_inputs(inputs, check_input_idxs)\n\n assert isinstance(inputs, (list, tuple))\n\n inps_expanded_dims = [\n get_expanded_dims(x) if idx not in static_input_idxs else []\n for idx, x in enumerate(inputs)\n ]\n\n # allocate static tensor inputs\n static_inputs = [\n x\n if not isinstance(x, torch.Tensor)\n else static_input(x)\n if idx not in static_input_idxs\n else x.detach()\n for idx, x in enumerate(inputs)\n ]\n\n # copy over input values for fresh allocations\n for idx, (x, expanded_dims) in enumerate(zip(inputs, inps_expanded_dims)):\n if isinstance(x, torch.Tensor) and idx not in static_input_idxs:\n index_expanded_dims_and_copy_(static_inputs[idx], x, expanded_dims)\n\n # warmup\n torch.cuda.synchronize()\n stream = torch.cuda.Stream()\n stream.wait_stream(torch.cuda.current_stream())\n # copy static_inputs because it will be cleared in model\n with torch.cuda.stream(stream):\n model(list(static_inputs))\n stream.synchronize()\n torch.cuda.current_stream().wait_stream(stream)\n torch.cuda.synchronize()\n\n # record\n graph = torch.cuda.CUDAGraph()\n with torch.cuda.graph(graph, stream=stream):\n static_outputs = model(list(static_inputs))\n if not isinstance(static_outputs, (list, tuple)):\n static_outputs = (static_outputs,)\n\n if config.size_asserts:\n\n def run(new_inputs):\n assert len(static_inputs) == len(new_inputs)\n for idx, (dst, src, expanded_dims) in enumerate(\n zip(static_inputs, new_inputs, inps_expanded_dims)\n ):\n if not isinstance(dst, torch.Tensor):\n pass\n elif idx in static_input_idxs:\n assert dst.data_ptr() == src.data_ptr()\n else:\n # TODO - could make one single op of multiple slices\n # and avoid dispatch.\n # Could also pre-index the `dst` tensors\n index_expanded_dims_and_copy_(dst, src, expanded_dims)\n new_inputs.clear()\n graph.replay()\n return static_outputs\n\n else:\n copy_indices = [\n idx for idx in range(len(static_inputs)) if idx not in static_input_idxs\n ]\n\n def run(new_inputs):\n for idx in copy_indices:\n expanded_dims = inps_expanded_dims[idx]\n index_expanded_dims_and_copy_(\n static_inputs[idx], new_inputs[idx], expanded_dims\n )\n new_inputs.clear()\n graph.replay()\n return static_outputs\n\n return align_inputs_from_check_idxs(run, check_input_idxs)\n\n\ndef count_tangents(fx_g: torch.fx.GraphModule):\n \"\"\"\n Infers which inputs are static for a backwards graph\n \"\"\"\n\n def is_saved_tensor(x):\n return (\n \"tangents\" not in x.name\n and \"bwd_seed\" not in x.name\n and \"bwd_base_offset\" not in x.name\n )\n\n arg_count = 0\n static_arg_idxs = []\n for n in fx_g.graph.nodes:\n if n.op == \"placeholder\":\n if is_saved_tensor(n):\n static_arg_idxs.append(arg_count)\n arg_count += 1\n\n assert static_arg_idxs == list(range(len(static_arg_idxs)))\n return len(static_arg_idxs)\n\n\n_in_aot_compilation = BoxedBool(False)\n\n\ndef compile_fx_aot(\n model_: torch.fx.GraphModule,\n example_inputs_: List[torch.Tensor],\n inner_compile=compile_fx_inner,\n config_patches: Optional[Dict[str, Any]] = None,\n):\n config_patches = (\n {\"cpp_wrapper\": True}\n if config_patches is None\n else {**config_patches, \"cpp_wrapper\": True}\n )\n if (\n \"aot_inductor_output_path\" not in config_patches\n and not config.aot_inductor_output_path\n ):\n config_patches = {\n **config_patches,\n \"aot_inductor_output_path\": code_hash(model_.code),\n }\n\n with unittest.mock.patch.object(_in_aot_compilation, \"value\", True):\n return compile_fx(\n model_,\n example_inputs_,\n inner_compile=functools.partial(inner_compile, aot_mode=True),\n config_patches=config_patches,\n )\n\n\n_graph_counter = itertools.count(0)\n\n\ndef fw_compiler_freezing(\n aot_autograd_model: torch.fx.GraphModule,\n aot_example_inputs,\n dynamo_model,\n num_example_inputs,\n inner_compile,\n cudagraphs,\n graph_id,\n forward_device,\n):\n from torch._inductor.freezing import convert_conv_weights_to_channels_last, freeze\n\n # partition_fn won't be called\n joint_graph_passes(aot_autograd_model)\n\n layout_opt = GraphLowering.decide_layout_opt(aot_autograd_model)\n if layout_opt:\n # make sure meta['val'] is properly setup\n fake_tensor_prop(aot_autograd_model, aot_example_inputs, True)\n convert_conv_weights_to_channels_last(aot_autograd_model)\n\n opt_model, preserved_arg_indices = freeze(\n dynamo_model,\n aot_autograd_model,\n aot_example_inputs,\n )\n\n aot_example_inputs = [aot_example_inputs[ind] for ind in preserved_arg_indices]\n num_fixed = len(preserved_arg_indices) - num_example_inputs\n\n fake_mode = detect_fake_mode(aot_example_inputs)\n\n # for freezing, all graph outputs should be user visible\n *_, model_outputs_node = opt_model.graph.nodes\n model_outputs = model_outputs_node.args[0]\n user_visible_outputs = [\n n.name for n in model_outputs if isinstance(n, torch.fx.Node)\n ]\n\n # constant params will be real tensors, not fake\n params_flat = torch._guards.TracingContext.get().params_flat\n for i in range(len(params_flat)):\n if i not in preserved_arg_indices:\n params_flat[i] = None\n\n with unittest.mock.patch.object(fake_mode, \"allow_non_fake_inputs\", True):\n optimized_function = inner_compile(\n opt_model,\n aot_example_inputs,\n num_fixed=num_fixed,\n cudagraphs=cudagraphs,\n graph_id=graph_id,\n is_inference=True,\n boxed_forward_device_index=forward_device,\n layout_opt=layout_opt,\n user_visible_outputs=user_visible_outputs,\n )\n\n # aot_inductor codegens a call that takes in just the inputs, so we don't return a wrapper\n # that drops constant-ified params\n if _in_aot_compilation:\n return optimized_function\n\n def wrapper(args):\n args_new = [args[i] for i in preserved_arg_indices]\n args.clear()\n return optimized_function(args_new)\n\n wrapper._boxed_call = True\n\n return wrapper\n\n\ndef compile_fx(\n model_: torch.fx.GraphModule,\n example_inputs_: List[torch.Tensor],\n inner_compile=compile_fx_inner,\n config_patches: Optional[Dict[str, Any]] = None,\n decompositions: Optional[Dict[OpOverload, Callable]] = None,\n):\n \"\"\"Main entrypoint to a compile given FX graph\"\"\"\n if config_patches:\n with config.patch(config_patches):\n return compile_fx(\n model_,\n example_inputs_,\n # need extra layer of patching as backwards is compiled out of scope\n inner_compile=config.patch(config_patches)(inner_compile),\n decompositions=decompositions,\n )\n\n if config.cpp_wrapper:\n with config.patch(\n {\n \"cpp_wrapper\": False,\n \"triton.autotune_cublasLt\": False,\n \"triton.cudagraphs\": False,\n # CudaWrapperCodeGen relies on kernel name to find the autotuned cubin file\n \"triton.unique_kernel_names\": True,\n }\n ), V.set_real_inputs(example_inputs_):\n return compile_fx(\n model_,\n example_inputs_,\n inner_compile=inner_compile_with_cpp_wrapper(inner_compile),\n decompositions=decompositions,\n )\n\n recursive_compile_fx = functools.partial(\n compile_fx,\n inner_compile=inner_compile,\n decompositions=decompositions,\n )\n\n if not graph_returns_tuple(model_):\n return make_graph_return_tuple(\n model_,\n example_inputs_,\n recursive_compile_fx,\n )\n\n if isinstance(model_, torch.fx.GraphModule):\n if isinstance(model_.graph._codegen, _PyTreeCodeGen):\n # this graph is the result of dynamo.export()\n return handle_dynamo_export_graph(\n model_,\n example_inputs_,\n recursive_compile_fx,\n )\n\n # Since handle_dynamo_export_graph will trigger compile_fx again,\n # Move these passes after handle_dynamo_export_graph to avoid repeated calls.\n model_ = pre_grad_passes(model_, example_inputs_)\n\n if any(isinstance(x, (list, tuple, dict)) for x in example_inputs_):\n return flatten_graph_inputs(\n model_,\n example_inputs_,\n recursive_compile_fx,\n )\n\n assert not config._raise_error_for_testing\n num_example_inputs = len(example_inputs_)\n cudagraphs = BoxedBool(config.triton.cudagraphs)\n forward_device = BoxedDeviceIndex(None)\n\n graph_id = next(_graph_counter)\n\n decompositions = (\n decompositions if decompositions is not None else select_decomp_table()\n )\n\n @dynamo_utils.dynamo_timed\n def fw_compiler_base(model: torch.fx.GraphModule, example_inputs, is_inference):\n if is_inference:\n # partition_fn won't be called\n joint_graph_passes(model)\n\n num_rng_seed_offset_inputs = 2 if functorch_config.functionalize_rng_ops else 0\n fixed = len(example_inputs) - num_example_inputs - num_rng_seed_offset_inputs\n user_visible_outputs = set()\n\n if config.keep_output_stride:\n *_, model_outputs_node = model.graph.nodes\n assert model_outputs_node.op == \"output\"\n model_outputs, _ = pytree.tree_flatten(model_outputs_node.args)\n num_model_outputs = len(model_outputs)\n\n if torch._guards.TracingContext.get():\n original_output_start_index = (\n torch._guards.TracingContext.get().fw_metadata.num_mutated_inputs\n )\n else:\n original_output_start_index = 0\n\n if isinstance(model_, torch.fx.GraphModule):\n *_, orig_model_outputs_node = model_.graph.nodes\n assert orig_model_outputs_node.op == \"output\"\n orig_model_outputs, _ = pytree.tree_flatten(\n orig_model_outputs_node.args\n )\n num_orig_model_outputs = len(orig_model_outputs)\n else:\n num_orig_model_outputs = num_model_outputs\n\n assert num_orig_model_outputs <= num_model_outputs\n\n # We makes the following assumption\n # For inference\n # len(orig_model_outputs) == len(model_outputs)\n # For training\n # len(orig_model_outputs) <= len(model_outputs)\n # During training, most of the time the model_outputs starts with\n # orignal module's outputs followed by saved activations.\n # But this can be not true if the model have inplace updated tensors.\n # AOTAutograd will make those tensors being returned before the orignal\n # module's output.\n # To make things safe, we'll use original_output_start_index field\n # set by AOTAutograd to decide where the original module outputs start.\n\n user_visible_outputs = {\n n.name\n for n in model_outputs[\n original_output_start_index : original_output_start_index\n + num_orig_model_outputs\n ]\n if isinstance(n, torch.fx.Node)\n }\n\n return inner_compile(\n model,\n example_inputs,\n num_fixed=fixed,\n cudagraphs=cudagraphs,\n graph_id=graph_id,\n is_inference=is_inference,\n boxed_forward_device_index=forward_device,\n user_visible_outputs=user_visible_outputs,\n )\n\n fw_compiler = functools.partial(fw_compiler_base, is_inference=False)\n\n if config.freezing and not torch.is_grad_enabled():\n inference_compiler = functools.partial(\n fw_compiler_freezing,\n dynamo_model=model_,\n num_example_inputs=num_example_inputs,\n inner_compile=inner_compile,\n cudagraphs=cudagraphs,\n graph_id=graph_id,\n forward_device=forward_device,\n )\n else:\n inference_compiler = functools.partial(fw_compiler_base, is_inference=True)\n\n def partition_fn(graph, joint_inputs, **kwargs):\n joint_graph_passes(graph)\n return min_cut_rematerialization_partition(\n graph, joint_inputs, **kwargs, compiler=\"inductor\"\n )\n\n @dynamo_utils.dynamo_timed\n def bw_compiler(model: torch.fx.GraphModule, example_inputs):\n fixed = count_tangents(model)\n return inner_compile(\n model,\n example_inputs,\n num_fixed=fixed,\n cudagraphs=cudagraphs,\n is_backward=True,\n graph_id=graph_id,\n boxed_forward_device_index=forward_device,\n )\n\n # TODO: can add logging before/after the call to create_aot_dispatcher_function\n # in torch._functorch/aot_autograd.py::aot_module_simplified::aot_function_simplified::new_func\n # once torchdynamo is merged into pytorch\n fake_mode = detect_fake_mode(example_inputs_) or torch._subclasses.FakeTensorMode(\n allow_non_fake_inputs=True\n )\n tracing_context = (\n torch._guards.TracingContext.get() or torch._guards.TracingContext(fake_mode)\n )\n with V.set_fake_mode(fake_mode), torch._guards.tracing(\n tracing_context\n ), compiled_autograd.disable():\n return aot_autograd(\n fw_compiler=fw_compiler,\n bw_compiler=bw_compiler,\n inference_compiler=inference_compiler,\n decompositions=decompositions,\n partition_fn=partition_fn,\n keep_inference_input_mutations=True,\n )(model_, example_inputs_)\n\n\n# pass config dict back to user\ndef get_patched_config_dict(config_patches=None):\n with config.patch(config_patches):\n return config.get_config_copy()\n\n\ndef _shape_env_from_inputs(inputs):\n shape_env = None\n fake_mode = detect_fake_mode(inputs)\n\n # TODO(voz): It would be nice to enable this assert, but there are lots of tests that\n # pass in real inputs for now.\n # if len(inputs) > 0:\n # assert fake_mode is not None, breakpoint()\n\n if fake_mode is not None:\n return fake_mode.shape_env\n\n # When there are no tensor inputs, get shape_env from the first SymInt.\n for input in inputs:\n if isinstance(input, torch.SymInt):\n return input.node.shape_env\n\n # TODO(voz): Should we always have one anyway?\n return None\n\n\ndef output_node(gm: torch.fx.GraphModule):\n \"\"\"Get the output node from an FX graph\"\"\"\n last_node = next(iter(reversed(gm.graph.nodes)))\n assert last_node.op == \"output\"\n return last_node\n\n\ndef graph_returns_tuple(gm: torch.fx.GraphModule):\n \"\"\"True if a FX graph returns a tuple\"\"\"\n if not isinstance(gm, torch.fx.GraphModule):\n return True # can't check this, assume true\n (rv,) = output_node(gm).args\n if isinstance(rv, (list, tuple)):\n return True\n if (\n isinstance(rv, torch.fx.node.Node)\n and hasattr(rv.target, \"_schema\")\n and len(rv.target._schema.returns) > 1\n and all(str(ret.type) == \"Tensor\" for ret in rv.target._schema.returns)\n ):\n # for graphs whose result is one node with multiple outputs\n return True\n return False\n\n\ndef make_graph_return_tuple(gm: torch.fx.GraphModule, inputs, compile_gm):\n \"\"\"\n Mutate gm so it returns a tuple. This is only needed for graphs\n not created by torchdynamo that return non-tuples.\n \"\"\"\n node = output_node(gm)\n (rv,) = node.args\n rv, spec = pytree.tree_flatten(rv)\n with gm.graph.inserting_before(node):\n gm.graph.output(rv)\n gm.graph.erase_node(node)\n assert graph_returns_tuple(gm)\n\n compiled_fn = compile_gm(gm, inputs)\n\n @functools.wraps(compiled_fn)\n def wrapper(*args, **kwargs):\n return pytree.tree_unflatten(compiled_fn(*args, **kwargs), spec)\n\n return wrapper\n\n\ndef flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):\n \"\"\"\n Mutate inputs so that they are flat and wrap gm such that it\n accepts those inputs. This is only needed for graphs not created\n by torchdynamo that take bumpy inputs.\n \"\"\"\n inputs, spec = pytree.tree_flatten(inputs)\n\n class GmWrapper(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.gm = gm\n\n def forward(self, *args):\n return self.gm(*pytree.tree_unflatten(args, spec))\n\n compiled_fn = compile_gm(GmWrapper(), inputs)\n\n @functools.wraps(compiled_fn)\n def wrapper(*args):\n # note this doesn't check the spec, assuming it is the same\n return compiled_fn(*pytree.tree_flatten(args)[0])\n\n return wrapper\n\n\ndef handle_dynamo_export_graph(gm, inputs, compile_gm):\n \"\"\"\n `torch._dynamo.export` embeds pytrees in the FX graph codegen object,\n convert that to a normal FX graph so inductor can compile it.\n \"\"\"\n codegen = gm.graph._codegen\n gm.graph._codegen = torch.fx.graph.CodeGen()\n gm.recompile()\n\n compiled_fn = compile_gm(gm, codegen.process_inputs(*inputs))\n\n @functools.wraps(compiled_fn)\n def wrapper(*args):\n return codegen.process_outputs(compiled_fn(*codegen.process_inputs(*args)))\n\n return wrapper\n", "sub_path": "torch/_inductor/compile_fx.py", "file_name": "compile_fx.py", "file_ext": "py", "file_size_in_byte": 41624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "functools.wraps", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 59, "usage_type": "call"}, {"api_name": "torch._functorch.config._logging.getArtifactLogger", "line_number": 60, "usage_type": "call"}, {"api_name": "torch._functorch.config._logging", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 60, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 64, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 81, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 93, "usage_type": "name"}, {"api_name": "torch._functorch.config.ops.aten.slice", "line_number": 100, "usage_type": "call"}, {"api_name": "torch._functorch.config.ops", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 100, "usage_type": "name"}, {"api_name": "torch._functorch.config._debug_has_internal_overlap", "line_number": 108, "usage_type": "call"}, {"api_name": "torch._functorch.config", "line_number": 108, "usage_type": "name"}, {"api_name": "torch._dynamo.logging.get_step_logger", "line_number": 123, "usage_type": "call"}, {"api_name": "torch._dynamo.logging", "line_number": 123, "usage_type": "name"}, {"api_name": "functools.lru_cache", "line_number": 121, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda.is_available", "line_number": 129, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 129, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 129, "usage_type": "name"}, {"api_name": "torch._functorch.config.backends", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 130, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.get_device_capability", "line_number": 131, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 131, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 131, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 133, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 126, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 139, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 139, "usage_type": "name"}, {"api_name": "torch._functorch.config.ops", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 140, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 151, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 151, "usage_type": "name"}, {"api_name": "torch._functorch.config.float32", "line_number": 152, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 152, "usage_type": "name"}, {"api_name": "graph.GraphLowering", "line_number": 163, "usage_type": "call"}, {"api_name": "virtualized.V.set_graph_handler", "line_number": 164, "usage_type": "call"}, {"api_name": "virtualized.V", "line_number": 164, "usage_type": "name"}, {"api_name": "graph.run", "line_number": 165, "usage_type": "call"}, {"api_name": "graph.count_bytes", "line_number": 166, "usage_type": "call"}, {"api_name": "torch._functorch.aot_autograd.make_boxed_func", "line_number": 169, "usage_type": "call"}, {"api_name": "debug.DebugContext.wrap", "line_number": 159, "usage_type": "attribute"}, {"api_name": "debug.DebugContext", "line_number": 159, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 174, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 174, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 174, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 174, "usage_type": "attribute"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 185, "usage_type": "name"}, {"api_name": "pattern_matcher.clone_graph", "line_number": 207, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 209, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 209, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 210, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 210, "usage_type": "name"}, {"api_name": "torch._functorch.config.SymInt", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 213, "usage_type": "name"}, {"api_name": "torch._functorch.config.SymFloat", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch._subclasses.fake_tensor.FakeTensor", "line_number": 217, "usage_type": "argument"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 220, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 220, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 220, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 226, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 226, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 226, "usage_type": "name"}, {"api_name": "virtualized.V.real_inputs", "line_number": 229, "usage_type": "attribute"}, {"api_name": "virtualized.V", "line_number": 229, "usage_type": "name"}, {"api_name": "torch._functorch.config.utils._python_dispatch._disable_current_modes", "line_number": 233, "usage_type": "call"}, {"api_name": "torch._functorch.config.utils", "line_number": 233, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 233, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 173, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 246, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 246, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 247, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 247, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 247, "usage_type": "name"}, {"api_name": "torch._dynamo.utils.detect_fake_mode", "line_number": 255, "usage_type": "call"}, {"api_name": "torch._functorch.config._subclasses.FakeTensorMode", "line_number": 257, "usage_type": "call"}, {"api_name": "torch._functorch.config._subclasses", "line_number": 257, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 257, "usage_type": "name"}, {"api_name": "torch.fx.passes.fake_tensor_prop.FakeTensorProp", "line_number": 258, "usage_type": "call"}, {"api_name": "contextlib.nullcontext", "line_number": 261, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 263, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 263, "usage_type": "attribute"}, {"api_name": "torch.fx.passes.fake_tensor_prop.FakeTensorProp", "line_number": 266, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 277, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 278, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 278, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 278, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 279, "usage_type": "name"}, {"api_name": "torch._dynamo.utils.count_calls", "line_number": 290, "usage_type": "call"}, {"api_name": "torch._dynamo.utils", "line_number": 290, "usage_type": "name"}, {"api_name": "torch._functorch.aot_autograd.make_boxed_func", "line_number": 291, "usage_type": "call"}, {"api_name": "torch._inductor.codecache.CompiledFxGraph", "line_number": 310, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 322, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 322, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 329, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 329, "usage_type": "name"}, {"api_name": "utils.has_incompatible_cudagraph_ops", "line_number": 344, "usage_type": "call"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 348, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 348, "usage_type": "name"}, {"api_name": "torch._functorch.config.SymInt", "line_number": 348, "usage_type": "attribute"}, {"api_name": "torch._functorch.config.SymInt", "line_number": 366, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 366, "usage_type": "name"}, {"api_name": "torch._functorch.config._inductor.cudagraph_trees.get_manager", "line_number": 395, "usage_type": "call"}, {"api_name": "torch._functorch.config._inductor", "line_number": 395, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 395, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 437, "usage_type": "attribute"}, {"api_name": "debug.DebugContext.wrap", "line_number": 273, "usage_type": "attribute"}, {"api_name": "debug.DebugContext", "line_number": 273, "usage_type": "name"}, {"api_name": "torch._functorch.config.utils._python_dispatch._disable_current_modes", "line_number": 274, "usage_type": "call"}, {"api_name": "torch._functorch.config.utils", "line_number": 274, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 274, "usage_type": "name"}, {"api_name": "torch._inductor.fb.utils.time_and_log", "line_number": 275, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 449, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 449, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 450, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 450, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 450, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 451, "usage_type": "name"}, {"api_name": "sys.setrecursionlimit", "line_number": 466, "usage_type": "call"}, {"api_name": "sys.getrecursionlimit", "line_number": 466, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 469, "usage_type": "attribute"}, {"api_name": "virtualized.V.debug.fx_graph", "line_number": 474, "usage_type": "call"}, {"api_name": "virtualized.V.debug", "line_number": 474, "usage_type": "attribute"}, {"api_name": "virtualized.V", "line_number": 474, "usage_type": "name"}, {"api_name": "fx_passes.post_grad.view_to_reshape", "line_number": 494, "usage_type": "call"}, {"api_name": "virtualized.V.set_fake_mode", "line_number": 502, "usage_type": "call"}, {"api_name": "virtualized.V", "line_number": 502, "usage_type": "name"}, {"api_name": "fx_passes.post_grad.post_grad_passes", "line_number": 504, "usage_type": "call"}, {"api_name": "virtualized.V.debug.fx_graph_transformed", "line_number": 505, "usage_type": "call"}, {"api_name": "virtualized.V.debug", "line_number": 505, "usage_type": "attribute"}, {"api_name": "virtualized.V", "line_number": 505, "usage_type": "name"}, {"api_name": "virtualized.V.set_fake_mode", "line_number": 507, "usage_type": "call"}, {"api_name": "virtualized.V", "line_number": 507, "usage_type": "name"}, {"api_name": "graph.GraphLowering", "line_number": 508, "usage_type": "call"}, {"api_name": "virtualized.V.set_graph_handler", "line_number": 517, "usage_type": "call"}, {"api_name": "virtualized.V", "line_number": 517, "usage_type": "name"}, {"api_name": "graph.run", "line_number": 518, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 519, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 519, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 519, "usage_type": "name"}, {"api_name": "graph.graph_outputs", "line_number": 523, "usage_type": "attribute"}, {"api_name": "virtualized.V.graph.sizevars.size_hint", "line_number": 527, "usage_type": "call"}, {"api_name": "virtualized.V.graph", "line_number": 527, "usage_type": "attribute"}, {"api_name": "virtualized.V", "line_number": 527, "usage_type": "name"}, {"api_name": "graph.compile_to_fn", "line_number": 532, "usage_type": "call"}, {"api_name": "graph.disable_cudagraphs", "line_number": 534, "usage_type": "attribute"}, {"api_name": "torch._inductor.codecache.CompiledFxGraph", "line_number": 537, "usage_type": "call"}, {"api_name": "graph.cache_key", "line_number": 539, "usage_type": "attribute"}, {"api_name": "graph.cache_path", "line_number": 540, "usage_type": "attribute"}, {"api_name": "graph.cache_linemap", "line_number": 541, "usage_type": "attribute"}, {"api_name": "graph.device_types", "line_number": 542, "usage_type": "attribute"}, {"api_name": "graph.device_idxs", "line_number": 543, "usage_type": "attribute"}, {"api_name": "graph.mutated_inputs", "line_number": 544, "usage_type": "attribute"}, {"api_name": "graph.mutated_input_idxs", "line_number": 545, "usage_type": "attribute"}, {"api_name": "torch._inductor.codecache.CompiledFxGraph", "line_number": 460, "usage_type": "name"}, {"api_name": "torch._functorch.config.as_strided", "line_number": 554, "usage_type": "call"}, {"api_name": "torch._functorch.config", "line_number": 554, "usage_type": "name"}, {"api_name": "torch._functorch.config.as_strided", "line_number": 555, "usage_type": "call"}, {"api_name": "torch._functorch.config", "line_number": 555, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 558, "usage_type": "name"}, {"api_name": "utils.get_dtype_size", "line_number": 566, "usage_type": "call"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 571, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 571, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 564, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 580, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 603, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 603, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 612, "usage_type": "call"}, {"api_name": "torch._inductor.cudagraph_trees.cudagraphify_impl", "line_number": 613, "usage_type": "argument"}, {"api_name": "torch._subclasses.fake_tensor.FakeTensor", "line_number": 623, "usage_type": "argument"}, {"api_name": "torch._dynamo.utils.preserve_rng_state", "line_number": 631, "usage_type": "call"}, {"api_name": "torch._dynamo.utils", "line_number": 631, "usage_type": "name"}, {"api_name": "torch._dynamo.utils.dynamo_timed", "line_number": 596, "usage_type": "attribute"}, {"api_name": "torch._dynamo.utils", "line_number": 596, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 646, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 646, "usage_type": "name"}, {"api_name": "torch._functorch.config.empty", "line_number": 663, "usage_type": "call"}, {"api_name": "torch._functorch.config", "line_number": 663, "usage_type": "name"}, {"api_name": "torch._functorch.config.as_strided", "line_number": 664, "usage_type": "call"}, {"api_name": "torch._functorch.config", "line_number": 664, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 692, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 692, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 701, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 701, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.synchronize", "line_number": 705, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 705, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 705, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.Stream", "line_number": 706, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 706, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 706, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.current_stream", "line_number": 707, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 707, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 707, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.stream", "line_number": 709, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 709, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 709, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.current_stream", "line_number": 712, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 712, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 712, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.synchronize", "line_number": 713, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 713, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 713, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.CUDAGraph", "line_number": 716, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 716, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 716, "usage_type": "name"}, {"api_name": "torch._functorch.config.cuda.graph", "line_number": 717, "usage_type": "call"}, {"api_name": "torch._functorch.config.cuda", "line_number": 717, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 717, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 729, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 729, "usage_type": "name"}, {"api_name": "graph.replay", "line_number": 739, "usage_type": "call"}, {"api_name": "graph.replay", "line_number": 754, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 760, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 760, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 788, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 788, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 789, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 789, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 789, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 791, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 791, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 791, "usage_type": "name"}, {"api_name": "torch._inductor.codecache.code_hash", "line_number": 804, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 807, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 807, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 811, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 816, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 820, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 820, "usage_type": "name"}, {"api_name": "fx_passes.joint_graph.joint_graph_passes", "line_number": 832, "usage_type": "call"}, {"api_name": "graph.GraphLowering.decide_layout_opt", "line_number": 834, "usage_type": "call"}, {"api_name": "graph.GraphLowering", "line_number": 834, "usage_type": "name"}, {"api_name": "torch._inductor.freezing.convert_conv_weights_to_channels_last", "line_number": 838, "usage_type": "call"}, {"api_name": "torch._inductor.freezing.freeze", "line_number": 840, "usage_type": "call"}, {"api_name": "torch._dynamo.utils.detect_fake_mode", "line_number": 849, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 855, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 855, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 859, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 859, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 859, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 864, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 864, "usage_type": "attribute"}, {"api_name": "torch._functorch.config.fx", "line_number": 893, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 893, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 894, "usage_type": "name"}, {"api_name": "torch._functorch.config.Tensor", "line_number": 894, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 894, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 896, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 896, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 896, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 897, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 897, "usage_type": "name"}, {"api_name": "torch._ops.OpOverload", "line_number": 897, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 897, "usage_type": "name"}, {"api_name": "virtualized.V.set_real_inputs", "line_number": 919, "usage_type": "call"}, {"api_name": "virtualized.V", "line_number": 919, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 927, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 940, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 940, "usage_type": "name"}, {"api_name": "fx.graph._PyTreeCodeGen", "line_number": 941, "usage_type": "argument"}, {"api_name": "fx_passes.pre_grad.pre_grad_passes", "line_number": 951, "usage_type": "call"}, {"api_name": "decomposition.select_decomp_table", "line_number": 968, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 972, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 972, "usage_type": "name"}, {"api_name": "fx_passes.joint_graph.joint_graph_passes", "line_number": 975, "usage_type": "call"}, {"api_name": "torch._functorch.config.functionalize_rng_ops", "line_number": 977, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 977, "usage_type": "name"}, {"api_name": "torch.utils._pytree.tree_flatten", "line_number": 984, "usage_type": "call"}, {"api_name": "torch.utils._pytree", "line_number": 984, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 987, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 987, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 987, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 989, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 989, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 989, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 994, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 994, "usage_type": "name"}, {"api_name": "torch.utils._pytree.tree_flatten", "line_number": 997, "usage_type": "call"}, {"api_name": "torch.utils._pytree", "line_number": 997, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 1025, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1025, "usage_type": "name"}, {"api_name": "torch._dynamo.utils.dynamo_timed", "line_number": 971, "usage_type": "attribute"}, {"api_name": "torch._dynamo.utils", "line_number": 971, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 1039, "usage_type": "call"}, {"api_name": "torch._functorch.config.is_grad_enabled", "line_number": 1041, "usage_type": "call"}, {"api_name": "torch._functorch.config", "line_number": 1041, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 1042, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 1052, "usage_type": "call"}, {"api_name": "fx_passes.joint_graph.joint_graph_passes", "line_number": 1055, "usage_type": "call"}, {"api_name": "functorch.compile.min_cut_rematerialization_partition", "line_number": 1056, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 1061, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1061, "usage_type": "name"}, {"api_name": "torch._dynamo.utils.dynamo_timed", "line_number": 1060, "usage_type": "attribute"}, {"api_name": "torch._dynamo.utils", "line_number": 1060, "usage_type": "name"}, {"api_name": "torch._dynamo.utils.detect_fake_mode", "line_number": 1076, "usage_type": "call"}, {"api_name": "torch._functorch.config._subclasses.FakeTensorMode", "line_number": 1076, "usage_type": "call"}, {"api_name": "torch._functorch.config._subclasses", "line_number": 1076, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1076, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.TracingContext.get", "line_number": 1080, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 1080, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1080, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.TracingContext", "line_number": 1080, "usage_type": "call"}, {"api_name": "virtualized.V.set_fake_mode", "line_number": 1082, "usage_type": "call"}, {"api_name": "virtualized.V", "line_number": 1082, "usage_type": "name"}, {"api_name": "torch._functorch.config._guards.tracing", "line_number": 1082, "usage_type": "call"}, {"api_name": "torch._functorch.config._guards", "line_number": 1082, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1082, "usage_type": "name"}, {"api_name": "torch._dynamo.compiled_autograd.disable", "line_number": 1084, "usage_type": "call"}, {"api_name": "torch._dynamo.compiled_autograd", "line_number": 1084, "usage_type": "name"}, {"api_name": "_dynamo.backends.common.aot_autograd", "line_number": 1085, "usage_type": "call"}, {"api_name": "torch._dynamo.utils.detect_fake_mode", "line_number": 1103, "usage_type": "call"}, {"api_name": "torch._functorch.config.SymInt", "line_number": 1115, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1115, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 1122, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1122, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 1129, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1129, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 1131, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1131, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 1137, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1137, "usage_type": "name"}, {"api_name": "torch._functorch.config.fx", "line_number": 1147, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1147, "usage_type": "name"}, {"api_name": "torch.utils._pytree.tree_flatten", "line_number": 1154, "usage_type": "call"}, {"api_name": "torch.utils._pytree", "line_number": 1154, "usage_type": "name"}, {"api_name": "torch.utils._pytree.tree_unflatten", "line_number": 1164, "usage_type": "call"}, {"api_name": "torch.utils._pytree", "line_number": 1164, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 1162, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 1169, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1169, "usage_type": "name"}, {"api_name": "torch.utils._pytree.tree_flatten", "line_number": 1175, "usage_type": "call"}, {"api_name": "torch.utils._pytree", "line_number": 1175, "usage_type": "name"}, {"api_name": "torch._functorch.config.nn", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1177, "usage_type": "name"}, {"api_name": "torch.utils._pytree.tree_unflatten", "line_number": 1183, "usage_type": "call"}, {"api_name": "torch.utils._pytree", "line_number": 1183, "usage_type": "name"}, {"api_name": "torch.utils._pytree.tree_flatten", "line_number": 1190, "usage_type": "call"}, {"api_name": "torch.utils._pytree", "line_number": 1190, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 1187, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx.graph.CodeGen", "line_number": 1201, "usage_type": "call"}, {"api_name": "torch._functorch.config.fx", "line_number": 1201, "usage_type": "attribute"}, {"api_name": "torch._functorch.config", "line_number": 1201, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 1206, "usage_type": "call"}]} +{"seq_id": "350580415", "text": "# ===================================================================\n#\n# Copyright (c) 2015, Legrandin <helderijs@gmail.com>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n# ===================================================================\n\nimport unittest\nimport time\nfrom Cryptodome.SelfTest.st_common import list_test_cases\nfrom Cryptodome.SelfTest.loader import load_tests\n\nfrom Cryptodome.PublicKey import ECC\nfrom Cryptodome.PublicKey.ECC import EccPoint, _curve, EccKey\n\nclass TestEccPoint_NIST(unittest.TestCase):\n \"\"\"Tests defined in section 4.3 of https://www.nsa.gov/ia/_files/nist-routines.pdf\"\"\"\n\n pointS = EccPoint(\n 0xde2444bebc8d36e682edd27e0f271508617519b3221a8fa0b77cab3989da97c9,\n 0xc093ae7ff36e5380fc01a5aad1e66659702de80f53cec576b6350b243042a256)\n\n pointT = EccPoint(\n 0x55a8b00f8da1d44e62f6b3b25316212e39540dc861c89575bb8cf92e35e0986b,\n 0x5421c3209c2d6c704835d82ac4c3dd90f61a8a52598b9e7ab656e9d8c8b24316)\n\n def test_set(self):\n pointW = EccPoint(0, 0)\n pointW.set(self.pointS)\n self.assertEqual(pointW, self.pointS)\n\n def test_copy(self):\n pointW = self.pointS.copy()\n self.assertEqual(pointW, self.pointS)\n pointW.set(self.pointT)\n self.assertEqual(pointW, self.pointT)\n self.assertNotEqual(self.pointS, self.pointT)\n\n def test_addition(self):\n pointRx = 0x72b13dd4354b6b81745195e98cc5ba6970349191ac476bd4553cf35a545a067e\n pointRy = 0x8d585cbb2e1327d75241a8a122d7620dc33b13315aa5c9d46d013011744ac264\n\n pointR = self.pointS + self.pointT\n self.assertEqual(pointR.x, pointRx)\n self.assertEqual(pointR.y, pointRy)\n\n pai = EccPoint.point_at_infinity()\n\n # S + 0\n pointR = self.pointS + pai\n self.assertEqual(pointR, self.pointS)\n\n # 0 + S\n pointR = pai + self.pointS\n self.assertEqual(pointR, self.pointS)\n\n # 0 + 0\n pointR = pai + pai\n self.assertEqual(pointR, pai)\n\n def test_inplace_addition(self):\n pointRx = 0x72b13dd4354b6b81745195e98cc5ba6970349191ac476bd4553cf35a545a067e\n pointRy = 0x8d585cbb2e1327d75241a8a122d7620dc33b13315aa5c9d46d013011744ac264\n\n pointR = self.pointS.copy()\n pointR += self.pointT\n self.assertEqual(pointR.x, pointRx)\n self.assertEqual(pointR.y, pointRy)\n\n pai = EccPoint.point_at_infinity()\n\n # S + 0\n pointR = self.pointS.copy()\n pointR += pai\n self.assertEqual(pointR, self.pointS)\n\n # 0 + S\n pointR = pai.copy()\n pointR += self.pointS\n self.assertEqual(pointR, self.pointS)\n\n # 0 + 0\n pointR = pai.copy()\n pointR += pai\n self.assertEqual(pointR, pai)\n\n def test_doubling(self):\n pointRx = 0x7669e6901606ee3ba1a8eef1e0024c33df6c22f3b17481b82a860ffcdb6127b0\n pointRy = 0xfa878162187a54f6c39f6ee0072f33de389ef3eecd03023de10ca2c1db61d0c7\n\n pointR = self.pointS.copy()\n pointR.double()\n self.assertEqual(pointR.x, pointRx)\n self.assertEqual(pointR.y, pointRy)\n\n # 2*0\n pai = self.pointS.point_at_infinity()\n pointR = pai.copy()\n pointR.double()\n self.assertEqual(pointR, pai)\n\n # S + S\n pointR = self.pointS.copy()\n pointR += pointR\n self.assertEqual(pointR.x, pointRx)\n self.assertEqual(pointR.y, pointRy)\n\n def test_scalar_multiply(self):\n d = 0xc51e4753afdec1e6b6c6a5b992f43f8dd0c7a8933072708b6522468b2ffb06fd\n pointRx = 0x51d08d5f2d4278882946d88d83c97d11e62becc3cfc18bedacc89ba34eeca03f\n pointRy = 0x75ee68eb8bf626aa5b673ab51f6e744e06f8fcf8a6c0cf3035beca956a7b41d5\n\n pointR = self.pointS * d\n self.assertEqual(pointR.x, pointRx)\n self.assertEqual(pointR.y, pointRy)\n\n # 0*S\n pai = self.pointS.point_at_infinity()\n pointR = self.pointS * 0\n self.assertEqual(pointR, pai)\n\n # -1*S\n self.assertRaises(ValueError, lambda: self.pointS * -1)\n\n def test_joing_scalar_multiply(self):\n d = 0xc51e4753afdec1e6b6c6a5b992f43f8dd0c7a8933072708b6522468b2ffb06fd\n e = 0xd37f628ece72a462f0145cbefe3f0b355ee8332d37acdd83a358016aea029db7\n pointRx = 0xd867b4679221009234939221b8046245efcf58413daacbeff857b8588341f6b8\n pointRy = 0xf2504055c03cede12d22720dad69c745106b6607ec7e50dd35d54bd80f615275\n\n pointR = self.pointS * d + self.pointT * e\n self.assertEqual(pointR.x, pointRx)\n self.assertEqual(pointR.y, pointRy)\n\n\nclass TestEccPoint_PAI(unittest.TestCase):\n \"\"\"Test vectors from http://point-at-infinity.org/ecc/nisttv\"\"\"\n\n pointG = EccPoint(_curve.Gx, _curve.Gy)\n\n\ntv_pai = load_tests((\"Cryptodome\", \"SelfTest\", \"PublicKey\", \"test_vectors\", \"ECC\"),\n \"point-at-infinity.org-P256.txt\",\n \"P-256 tests from point-at-infinity.org\",\n { \"k\" : lambda k: int(k),\n \"x\" : lambda x: int(x, 16),\n \"y\" : lambda y: int(y, 16)} )\nassert(tv_pai)\nfor tv in tv_pai:\n def new_test(self, scalar=tv.k, x=tv.x, y=tv.y):\n result = self.pointG * scalar\n self.assertEqual(result.x, x)\n self.assertEqual(result.y, y)\n setattr(TestEccPoint_PAI, \"test_%d\" % tv.count, new_test)\n\n\nclass TestEccKey(unittest.TestCase):\n\n def test_private_key(self):\n\n key = EccKey(curve=\"P-256\", d=1)\n self.assertEqual(key.d, 1)\n self.failUnless(key.has_private())\n self.assertEqual(key.pointQ.x, _curve.Gx)\n self.assertEqual(key.pointQ.y, _curve.Gy)\n\n point = EccPoint(_curve.Gx, _curve.Gy)\n key = EccKey(curve=\"P-256\", d=1, point=point)\n self.assertEqual(key.d, 1)\n self.failUnless(key.has_private())\n self.assertEqual(key.pointQ, point)\n\n # Other names\n key = EccKey(curve=\"secp256r1\", d=1)\n key = EccKey(curve=\"prime256v1\", d=1)\n\n def test_public_key(self):\n\n point = EccPoint(_curve.Gx, _curve.Gy)\n key = EccKey(curve=\"P-256\", point=point)\n self.failIf(key.has_private())\n self.assertEqual(key.pointQ, point)\n\n def test_public_key_derived(self):\n\n priv_key = EccKey(curve=\"P-256\", d=3)\n pub_key = priv_key.public_key()\n self.failIf(pub_key.has_private())\n self.assertEqual(priv_key.pointQ, pub_key.pointQ)\n\n def test_invalid_curve(self):\n self.assertRaises(ValueError, lambda: EccKey(curve=\"P-257\", d=1))\n\n def test_invalid_d(self):\n self.assertRaises(ValueError, lambda: EccKey(curve=\"P-256\", d=0))\n self.assertRaises(ValueError, lambda: EccKey(curve=\"P-256\", d=_curve.order))\n\n def test_equality(self):\n\n private_key = ECC.construct(d=3, curve=\"P-256\")\n private_key2 = ECC.construct(d=3, curve=\"P-256\")\n private_key3 = ECC.construct(d=4, curve=\"P-256\")\n\n public_key = private_key.public_key()\n public_key2 = private_key2.public_key()\n public_key3 = private_key3.public_key()\n\n self.assertEqual(private_key, private_key2)\n self.assertNotEqual(private_key, private_key3)\n\n self.assertEqual(public_key, public_key2)\n self.assertNotEqual(public_key, public_key3)\n\n self.assertNotEqual(public_key, private_key)\n\n\nclass TestEccModule(unittest.TestCase):\n\n def test_generate(self):\n\n key = ECC.generate(curve=\"P-256\")\n self.failUnless(key.has_private())\n self.assertEqual(key.pointQ, EccPoint(_curve.Gx, _curve.Gy) * key.d)\n\n # Other names\n ECC.generate(curve=\"secp256r1\")\n ECC.generate(curve=\"prime256v1\")\n\n def test_construct(self):\n\n key = ECC.construct(curve=\"P-256\", d=1)\n self.failUnless(key.has_private())\n self.assertEqual(key.pointQ, _curve.G)\n\n key = ECC.construct(curve=\"P-256\", point_x=_curve.Gx, point_y=_curve.Gy)\n self.failIf(key.has_private())\n self.assertEqual(key.pointQ, _curve.G)\n\n # Other names\n ECC.construct(curve=\"secp256r1\", d=1)\n ECC.construct(curve=\"prime256v1\", d=1)\n\n def test_negative_construct(self):\n coord = dict(point_x=10, point_y=4)\n coordG = dict(point_x=_curve.Gx, point_y=_curve.Gy)\n\n self.assertRaises(ValueError, ECC.construct, curve=\"P-256\", **coord)\n self.assertRaises(ValueError, ECC.construct, curve=\"P-256\", d=2, **coordG)\n\n\ndef get_tests(config={}):\n tests = []\n tests += list_test_cases(TestEccPoint_NIST)\n tests += list_test_cases(TestEccPoint_PAI)\n tests += list_test_cases(TestEccKey)\n tests += list_test_cases(TestEccModule)\n return tests\n\nif __name__ == '__main__':\n suite = lambda: unittest.TestSuite(get_tests())\n unittest.main(defaultTest='suite')\n", "sub_path": "renthouse/macvenv/lib/python3.7/site-packages/Cryptodome/SelfTest/PublicKey/test_ECC.py", "file_name": "test_ECC.py", "file_ext": "py", "file_size_in_byte": 10054, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 42, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 46, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 51, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint.point_at_infinity", "line_number": 70, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 70, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint.point_at_infinity", "line_number": 93, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 93, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 159, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 162, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gx", "line_number": 162, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 162, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gy", "line_number": 162, "usage_type": "attribute"}, {"api_name": "Cryptodome.SelfTest.loader.load_tests", "line_number": 165, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 180, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 184, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gx", "line_number": 187, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 187, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gy", "line_number": 188, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 188, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 190, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gx", "line_number": 190, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 190, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gy", "line_number": 190, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 191, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 197, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 198, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 202, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gx", "line_number": 202, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 202, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gy", "line_number": 202, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 203, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 209, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 215, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 218, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC.EccKey", "line_number": 219, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.order", "line_number": 219, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 219, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 223, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 223, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 224, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 224, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 225, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 225, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 240, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.generate", "line_number": 244, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 244, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.EccPoint", "line_number": 246, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gx", "line_number": 246, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 246, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gy", "line_number": 246, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.generate", "line_number": 249, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 249, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.generate", "line_number": 250, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 250, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 254, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 254, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.G", "line_number": 256, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 256, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 258, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 258, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gx", "line_number": 258, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 258, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gy", "line_number": 258, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.G", "line_number": 260, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 260, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 263, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 263, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 264, "usage_type": "call"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 264, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gx", "line_number": 268, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC._curve", "line_number": 268, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC._curve.Gy", "line_number": 268, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 270, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 270, "usage_type": "name"}, {"api_name": "Cryptodome.PublicKey.ECC.construct", "line_number": 271, "usage_type": "attribute"}, {"api_name": "Cryptodome.PublicKey.ECC", "line_number": 271, "usage_type": "name"}, {"api_name": "Cryptodome.SelfTest.st_common.list_test_cases", "line_number": 276, "usage_type": "call"}, {"api_name": "Cryptodome.SelfTest.st_common.list_test_cases", "line_number": 277, "usage_type": "call"}, {"api_name": "Cryptodome.SelfTest.st_common.list_test_cases", "line_number": 278, "usage_type": "call"}, {"api_name": "Cryptodome.SelfTest.st_common.list_test_cases", "line_number": 279, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 283, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "21297774", "text": "import datetime\r\nimport os\r\nimport time\r\nimport sys\r\nimport PyQt5\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QLineEdit, QLabel, QCheckBox, QTextEdit, QStatusBar, \\\r\n QProgressBar, QSizePolicy, QAbstractItemView, QWidget, QTabWidget, QHBoxLayout, QVBoxLayout, QSlider\r\nfrom PyQt5.QtGui import QPixmap, QIcon, QFont, QTextCursor, QPalette, QImage, QBrush, QImage\r\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal, pyqtSlot, QObject, QSize\r\nimport RPi.GPIO as GPIO\r\n\r\n\r\n# imports from user made files\r\nfrom GUI_Stylesheets import GUI_Stylesheets\r\nfrom GUI_Buttons import Send_Command_Button, Logo_Button, Relay_1_40_Ohm, Relay_1_60_Ohm, Relay_1_500_Ohm, Relay_1_1k_Ohm, \\\r\n Relay_2_40_Ohm, Relay_2_60_Ohm, Relay_2_500_Ohm, Relay_2_1k_Ohm\r\nfrom buttonHandler import handlers\r\nfrom GPIO_thread1 import GPIO_Ch1_Thread\r\nfrom GPIO_thread2 import GPIO_Ch2_Thread\r\n\r\n# Current version of application - Update for new builds\r\nappVersion = \"1.0\" # Update version\r\n\r\n# Icon Image locations\r\nMain_path = os.getcwd() + \"/\"\r\nIcon_Path = Main_path + \"/Logo/logo.png\"\r\nMediatech_Path = Main_path + \"/Logo/Medicatech.png\"\r\n\r\n# Instantiate style sheets for GUI Objects\r\nGUI_Style = GUI_Stylesheets()\r\n\r\n#BCM chip pinout\r\nRelay1_40 = 7\r\nRelay1_60 = 12\r\nRelay1_500 = 16\r\nRelay1_1k = 20\r\nRelay2_40 = 21\r\nRelay2_60 = 13\r\nRelay2_500 = 19\r\nRelay2_1k = 26\r\n\r\n\r\n# --------------------------------------------------------------------------------------------------------------\r\n# --------------------------------- Main Window Class ----------------------------------------------------------\r\n# --------------------------------------------------------------------------------------------------------------\r\nclass Window(QMainWindow):\r\n\r\n # Initialization of the GUI\r\n def __init__(self):\r\n super(Window, self).__init__()\r\n # ~ self.setGeometry(50, 50, 1100, 750)\r\n self.setWindowTitle(\"Battery TF\")\r\n self.setStyleSheet(GUI_Style.mainWindow)\r\n # ~ self.setMinimumSize(1100, 750)\r\n self.setWindowIcon(QIcon(Icon_Path))\r\n\r\n # --------------------------------------------------------------\r\n # -------------------- Initialize -----------------------------\r\n # --------------------------------------------------------------\r\n\t# GPIO Configuration\r\n GPIO.setmode(GPIO.BCM)\r\n GPIO.setup(Relay1_40, GPIO.OUT)\r\n GPIO.setup(Relay1_60, GPIO.OUT)\r\n GPIO.setup(Relay1_500, GPIO.OUT)\r\n GPIO.setup(Relay1_1k, GPIO.OUT)\r\n GPIO.setup(Relay2_40, GPIO.OUT)\r\n GPIO.setup(Relay2_60, GPIO.OUT)\r\n GPIO.setup(Relay2_500, GPIO.OUT)\r\n GPIO.setup(Relay2_1k, GPIO.OUT)\r\n\r\n # --------------------------------------------------------------\r\n # ---------------- Instantiate All Threads --------------------\r\n # --------------------------------------------------------------\r\n self.GPIO_ch1 = GPIO_Ch1_Thread(GPIO)\r\n self.GPIO_ch2 = GPIO_Ch2_Thread(GPIO)\r\n\r\n # --------------------------------------------------------------\r\n # ---------------- Start All Threads ---------------------------\r\n # --------------------------------------------------------------\r\n self.GPIO_ch1.start()\r\n self.GPIO_ch2.start()\r\n \r\n self.GPIO_ch1.setAllLow(True)\r\n self.GPIO_ch2.setAllLow(True)\r\n\r\n # --------------------------------------------------------------\r\n # ---------------- Create Main Widget --------------------------\r\n # --------------------------------------------------------------\r\n main_widget = QWidget()\r\n self.setCentralWidget(main_widget)\r\n\r\n # --------------------------------------------------------------\r\n # ---------------- Create Tabs ---------------------------------\r\n # --------------------------------------------------------------\r\n self.tabWidget()\r\n\r\n # --------------------------------------------------------------\r\n # -------------- Create Bottom Status Bar-----------------------\r\n # --------------------------------------------------------------\r\n self.StatusBar()\r\n self.setStatusBar(self.statusBar)\r\n\r\n # --------------------------------------------------------------\r\n # ------------- Create Main Window Layouts ---------------------\r\n # --------------------------------------------------------------\r\n # Instantiate GUI objects\r\n self.MainTitle()\r\n \r\n self.Console_Log()\r\n self.MainLogoButton()\r\n self.inputCommandPrompt()\r\n self.sendCommandButton()\r\n\r\n # Add title/ logo to the main title layout\r\n Main_Title_Layout = QHBoxLayout()\r\n Main_Title_Layout.addWidget(self.Logo_btn, 0, Qt.AlignRight)\r\n Main_Title_Layout.addWidget(self.MainTitleText, 0, Qt.AlignLeft)\r\n Main_Title_Layout.setSpacing(20)\r\n Main_Title_Layout.setContentsMargins(0, 0, 50, 0)\r\n\r\n # Layout command prompt and send button\r\n promptLayout = QHBoxLayout()\r\n promptLayout.addWidget(self.commandPrompt)\r\n promptLayout.addWidget(self.send_btn)\r\n promptLayout.setSpacing(20)\r\n\r\n # Layout right side of GUI window\r\n commandWindowLayout = QVBoxLayout()\r\n commandWindowLayout.addLayout(promptLayout)\r\n commandWindowLayout.addWidget(self.ConsoleLog)\r\n commandWindowLayout.setSpacing(20)\r\n\r\n # Create Layout for tab widget and console window\r\n horizontalWindow_layout = QHBoxLayout()\r\n horizontalWindow_layout.addWidget(self.MyTabs)\r\n horizontalWindow_layout.addLayout(commandWindowLayout)\r\n horizontalWindow_layout.setSpacing(20)\r\n\r\n # Add tabs and video stream to main window layout\r\n Full_Window_layout = QVBoxLayout()\r\n Full_Window_layout.addLayout(Main_Title_Layout)\r\n Full_Window_layout.addLayout(horizontalWindow_layout)\r\n Full_Window_layout.setSpacing(20)\r\n Full_Window_layout.setContentsMargins(20, 20, 20, 20)\r\n\r\n # --------------------------------------------------------------\r\n # ------------- Create Battery 1 Tab Layout --------------------\r\n # --------------------------------------------------------------\r\n # Instantiate Battery 1 GUI Objects\r\n self.remainingCapaAlarm_1()\r\n self.batteryMode_1()\r\n self.voltage_1()\r\n self.current_1()\r\n self.averageCurrent_1()\r\n self.relativeStatOfCharge_1()\r\n self.absoluteStatOfCharge_1()\r\n self.remainingCapacity_1()\r\n self.fullChargeCapacity_1()\r\n self.runTimeToEmpty_1()\r\n self.averageTimeToEmpty_1()\r\n self.averageTimeToFull_1()\r\n self.chargingCurrent_1()\r\n self.chargingVoltage_1()\r\n self.Battery_Status_1()\r\n self.Cycle_Count_1()\r\n self.Serial_Number_1()\r\n self.Device_Name_1()\r\n self.Cell_Voltage4_1()\r\n self.Cell_Voltage3_1()\r\n self.Cell_Voltage2_1()\r\n self.Cell_Voltage1_1()\r\n \r\n # Arrange Layout to go on Battery 1 tab\r\n vertical_battery1_LeftLayout = QVBoxLayout()\r\n vertical_battery1_LeftLayout.addLayout(self.battMode_Layout1)\r\n vertical_battery1_LeftLayout.addLayout(self.serialNumLayout1)\r\n vertical_battery1_LeftLayout.addLayout(self.deviceNameLayout1)\r\n vertical_battery1_LeftLayout.addLayout(self.charging_Current_Layout1)\r\n vertical_battery1_LeftLayout.addLayout(self.charging_Voltag_Layout1)\r\n vertical_battery1_LeftLayout.addLayout(self.batteryStatusLayout1)\r\n vertical_battery1_LeftLayout.addLayout(self.cycleCountLayout1)\r\n vertical_battery1_LeftLayout.addLayout(self.cellVoltage4_Layout1)\r\n vertical_battery1_LeftLayout.addLayout(self.cellVoltage3_Layout1)\r\n vertical_battery1_LeftLayout.addLayout(self.cellVoltage2_Layout1)\r\n vertical_battery1_LeftLayout.addLayout(self.cellVoltage1_Layout1)\r\n vertical_battery1_LeftLayout.setSpacing(10)\r\n\r\n vertical_battery1_RightLayout = QVBoxLayout()\r\n vertical_battery1_RightLayout.addLayout(self.remCapAlarm_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.voltage_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.current_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.avgCurr_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.relStateCharge_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.absStateCharge_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.remCap_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.fullCharge_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.runTimeToEmpty_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.avgTimeToEmpty_Layout1)\r\n vertical_battery1_RightLayout.addLayout(self.avgTimeToFull_Layout1)\r\n vertical_battery1_RightLayout.setSpacing(10)\r\n\r\n battery1Tab_layout = QHBoxLayout()\r\n battery1Tab_layout.addLayout(vertical_battery1_LeftLayout)\r\n battery1Tab_layout.addLayout(vertical_battery1_RightLayout)\r\n battery1Tab_layout.setSpacing(12)\r\n\r\n # Add final layout to main tab layout\r\n self.Battery1_Tab.setLayout(battery1Tab_layout)\r\n\r\n # --------------------------------------------------------------\r\n # ------------- Create Battery 2 Tab Layout --------------------\r\n # --------------------------------------------------------------\r\n # Instantiate Battery 2 GUI Objects\r\n self.remainingCapaAlarm_2()\r\n self.batteryMode_2()\r\n self.voltage_2()\r\n self.current_2()\r\n self.averageCurrent_2()\r\n self.relativeStatOfCharge_2()\r\n self.absoluteStatOfCharge_2()\r\n self.remainingCapacity_2()\r\n self.fullChargeCapacity_2()\r\n self.runTimeToEmpty_2()\r\n self.averageTimeToEmpty_2()\r\n self.averageTimeToFull_2()\r\n self.chargingCurrent_2()\r\n self.chargingVoltage_2()\r\n self.Battery_Status_2()\r\n self.Cycle_Count_2()\r\n self.Serial_Number_2()\r\n self.Device_Name_2()\r\n self.Cell_Voltage4_2()\r\n self.Cell_Voltage3_2()\r\n self.Cell_Voltage2_2()\r\n self.Cell_Voltage1_2()\r\n\r\n # Arrange Layout to go on Battery 2 tab\r\n vertical_battery2_LeftLayout = QVBoxLayout()\r\n vertical_battery2_LeftLayout.addLayout(self.battMode_Layout2)\r\n vertical_battery2_LeftLayout.addLayout(self.serialNumLayout2)\r\n vertical_battery2_LeftLayout.addLayout(self.deviceNameLayout2)\r\n vertical_battery2_LeftLayout.addLayout(self.charging_Current_Layout2)\r\n vertical_battery2_LeftLayout.addLayout(self.charging_Voltag_Layout2)\r\n vertical_battery2_LeftLayout.addLayout(self.batteryStatusLayout2)\r\n vertical_battery2_LeftLayout.addLayout(self.cycleCountLayout2)\r\n vertical_battery2_LeftLayout.addLayout(self.cellVoltage4_Layout2)\r\n vertical_battery2_LeftLayout.addLayout(self.cellVoltage3_Layout2)\r\n vertical_battery2_LeftLayout.addLayout(self.cellVoltage2_Layout2)\r\n vertical_battery2_LeftLayout.addLayout(self.cellVoltage1_Layout2)\r\n vertical_battery2_LeftLayout.setSpacing(10)\r\n\r\n vertical_battery2_RightLayout = QVBoxLayout()\r\n vertical_battery2_RightLayout.addLayout(self.remCapAlarm_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.voltage_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.current_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.avgCurr_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.relStateCharge_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.absStateCharge_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.remCap_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.fullCharge_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.runTimeToEmpty_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.avgTimeToEmpty_Layout2)\r\n vertical_battery2_RightLayout.addLayout(self.avgTimeToFull_Layout2)\r\n vertical_battery2_RightLayout.setSpacing(10)\r\n\r\n battery2Tab_layout = QHBoxLayout()\r\n battery2Tab_layout.addLayout(vertical_battery2_LeftLayout)\r\n battery2Tab_layout.addLayout(vertical_battery2_RightLayout)\r\n battery2Tab_layout.setSpacing(12)\r\n\r\n # Add final layout to main tab layout\r\n self.Battery2_Tab.setLayout(battery2Tab_layout)\r\n\r\n # --------------------------------------------------------------\r\n # ------------ Add Final Layout to Main Window -----------------\r\n # --------------------------------------------------------------\r\n # Set Main window layout to GUI central Widget\r\n self.centralWidget().setLayout(Full_Window_layout)\r\n self.centralWidget().isWindow()\r\n\r\n\r\n self.handleButtons()\r\n \r\n # Connect Signals\r\n self.GPIO_ch1.doneFlag1.connect(self.handle.ch1Buttons)\r\n self.GPIO_ch2.doneFlag2.connect(self.handle.ch2Buttons)\r\n \r\n # Display GUI Objects\r\n # ~ self.show()\r\n #~ self.showFullScreen()\r\n\r\n self.showMaximized()\r\n # --------------------------------------------------------------------------------------------------------------\r\n # ----------------------------- GUI Objects/ Functions ---------------------------------------------------------\r\n # --------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------\r\n # ------------------- Keyboard Key Functions -----------------------\r\n # ------------------------------------------------------------------\r\n def keyPressEvent(self, event):\r\n key = event.key()\r\n\r\n if key == Qt.Key_Return: # Send button\r\n self.send_btn.Un_Click()\r\n # ------------------------------------------------------------------\r\n # ------------------- Handlers -------------------------------------\r\n # ------------------------------------------------------------------\r\n def handleButtons(self):\r\n self.handle = handlers(self.relay1_40, self.relay1_60, self.relay1_500, self.relay1_1k, \r\n self.relay2_40, self.relay2_60, self.relay2_500, self.relay2_1k, \r\n self.GPIO_ch1, self.GPIO_ch2)\r\n \r\n # ------------------------------------------------------------------\r\n # ------------------- Main Title Function --------------------------\r\n # ------------------------------------------------------------------\r\n def MainTitle(self):\r\n self.MainTitleText = QLabel(self)\r\n self.MainTitleText.setText(\"Battery Test Fixture\")\r\n self.MainTitleText.setStyleSheet(GUI_Style.mainTitle)\r\n\r\n def MainLogoButton(self):\r\n self.Logo_btn = Logo_Button(self, \"\", self.ConsoleLog)\r\n self.Logo_btn.setStyleSheet(GUI_Style.startButton)\r\n self.Logo_btn.pressed.connect(self.Logo_btn.On_Click)\r\n self.Logo_btn.released.connect(self.Logo_btn.Un_Click)\r\n self.Logo_btn.setIcon(QIcon(Mediatech_Path))\r\n self.Logo_btn.setIconSize(QSize(300, 80))\r\n # ------------------------------------------------------------------\r\n # ------------------- Create Console Log --------------------------\r\n # ------------------------------------------------------------------\r\n def Console_Log(self):\r\n self.ConsoleLog = QTextEdit(self)\r\n #self.ConsoleLog.setMaximumHeight(100)\r\n self.ConsoleLog.setStyleSheet(GUI_Style.consoleLog)\r\n self.ConsoleLog.setPlaceholderText(\"Console Log\")\r\n self.ConsoleLog.setReadOnly(True)\r\n self.ConsoleLog.setLineWrapMode(True)\r\n self.ConsoleLog.setAlignment(Qt.AlignTop)\r\n # ------------------------------------------------------------------\r\n # ---------------- Create Input Command Prompt --------------------\r\n # ------------------------------------------------------------------\r\n def inputCommandPrompt(self):\r\n self.commandPrompt = QLineEdit(self)\r\n self.commandPrompt.setStyleSheet(GUI_Style.commandBox)\r\n self.commandPrompt.setPlaceholderText(\"Enter Command\")\r\n # ------------------------------------------------------------------\r\n # ------------------- Create Send Button --------------------------\r\n # ------------------------------------------------------------------\r\n def sendCommandButton(self):\r\n self.send_btn = Send_Command_Button(self, \"Send\", self.ConsoleLog, self.commandPrompt)\r\n self.send_btn.setMaximumSize(125, 30)\r\n self.send_btn.setMinimumSize(125, 30)\r\n self.send_btn.setStyleSheet(GUI_Style.sendButton)\r\n self.send_btn.pressed.connect(self.send_btn.On_Click)\r\n self.send_btn.released.connect(self.send_btn.Un_Click)\r\n # ------------------------------------------------------------------\r\n # --------------------- Create Tab Widget --------------------------\r\n # ------------------------------------------------------------------\r\n def tabWidget(self):\r\n self.MyTabs = QTabWidget()\r\n self.MyTabs.setStyleSheet(GUI_Style.tabs)\r\n self.MyTabs.setMaximumWidth(500)\r\n\r\n # Create each individual tabs\r\n self.Battery1_Tab = QWidget()\r\n self.Battery2_Tab = QWidget()\r\n\r\n # Add Tabs and Tab Icon to tab widget\r\n self.MyTabs.addTab(self.Battery1_Tab, ' Battery 1')\r\n self.MyTabs.addTab(self.Battery2_Tab, ' Battery 2')\r\n # ------------------------------------------------------------------\r\n # ----------- Create Battery 1 Tab GUI Objects --------------------\r\n # ------------------------------------------------------------------\r\n def remainingCapaAlarm_1(self):\r\n # create label\r\n self.remCapAlarm1 = QLabel(self)\r\n self.remCapAlarm1.setText(\"Remaining Capacity Alarm\")\r\n self.remCapAlarm1.setStyleSheet(GUI_Style.nameLabel)\r\n self.remCapAlarm1.setWordWrap(True)\r\n \r\n # create input box\r\n self.remCapAlarmBox1 = QLineEdit(self)\r\n self.remCapAlarmBox1.setStyleSheet(GUI_Style.updateField)\r\n self.remCapAlarmBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.remCapAlarm_Layout1 = QHBoxLayout()\r\n self.remCapAlarm_Layout1.addWidget(self.remCapAlarm1)\r\n self.remCapAlarm_Layout1.addWidget(self.remCapAlarmBox1)\r\n\r\n\r\n def batteryMode_1(self):\r\n # create label\r\n self.battMode1 = QLabel(self)\r\n self.battMode1.setText(\"Battery Mode\")\r\n self.battMode1.setStyleSheet(GUI_Style.nameLabel)\r\n self.battMode1.setWordWrap(True)\r\n \r\n # create input box\r\n self.battModeBox1 = QLineEdit(self)\r\n self.battModeBox1.setStyleSheet(GUI_Style.updateField)\r\n self.battModeBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.battMode_Layout1 = QHBoxLayout()\r\n self.battMode_Layout1.addWidget(self.battMode1)\r\n self.battMode_Layout1.addWidget(self.battModeBox1)\r\n\r\n def voltage_1(self):\r\n # create label\r\n self.voltage1 = QLabel(self)\r\n self.voltage1.setText(\"Voltage\")\r\n self.voltage1.setStyleSheet(GUI_Style.nameLabel)\r\n self.voltage1.setWordWrap(True)\r\n \r\n # create input box\r\n self.voltageBox1 = QLineEdit(self)\r\n self.voltageBox1.setStyleSheet(GUI_Style.updateField)\r\n self.voltageBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.voltage_Layout1 = QHBoxLayout()\r\n self.voltage_Layout1.addWidget(self.voltage1)\r\n self.voltage_Layout1.addWidget(self.voltageBox1)\r\n\r\n def current_1(self):\r\n # create label\r\n self.current1 = QLabel(self)\r\n self.current1.setText(\"Current\")\r\n self.current1.setStyleSheet(GUI_Style.nameLabel)\r\n self.current1.setWordWrap(True)\r\n \r\n # create input box\r\n self.currentBox1 = QLineEdit(self)\r\n self.currentBox1.setStyleSheet(GUI_Style.updateField)\r\n self.currentBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.current_Layout1 = QHBoxLayout()\r\n self.current_Layout1.addWidget(self.current1)\r\n self.current_Layout1.addWidget(self.currentBox1)\r\n\r\n def averageCurrent_1(self):\r\n # create label\r\n self.avgCurr1 = QLabel(self)\r\n self.avgCurr1.setText(\"Average Current\")\r\n self.avgCurr1.setStyleSheet(GUI_Style.nameLabel)\r\n self.avgCurr1.setWordWrap(True)\r\n \r\n # create input box\r\n self.avgCurrBox1 = QLineEdit(self)\r\n self.avgCurrBox1.setStyleSheet(GUI_Style.updateField)\r\n self.avgCurrBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.avgCurr_Layout1 = QHBoxLayout()\r\n self.avgCurr_Layout1.addWidget(self.avgCurr1)\r\n self.avgCurr_Layout1.addWidget(self.avgCurrBox1)\r\n\r\n def relativeStatOfCharge_1(self):\r\n # create label\r\n self.relStateCharge1 = QLabel(self)\r\n self.relStateCharge1.setText(\"Relative State Of Charge\")\r\n self.relStateCharge1.setStyleSheet(GUI_Style.nameLabel)\r\n self.relStateCharge1.setWordWrap(True)\r\n \r\n # create input box\r\n self.relStateChargeBox1 = QLineEdit(self)\r\n self.relStateChargeBox1.setStyleSheet(GUI_Style.updateField)\r\n self.relStateChargeBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.relStateCharge_Layout1 = QHBoxLayout()\r\n self.relStateCharge_Layout1.addWidget(self.relStateCharge1)\r\n self.relStateCharge_Layout1.addWidget(self.relStateChargeBox1)\r\n\r\n def absoluteStatOfCharge_1(self):\r\n # create label\r\n self.absStateCharge1 = QLabel(self)\r\n self.absStateCharge1.setText(\"Absolute State Of Charge\")\r\n self.absStateCharge1.setStyleSheet(GUI_Style.nameLabel)\r\n self.absStateCharge1.setWordWrap(True)\r\n \r\n # create input box\r\n self.absStateChargeBox1 = QLineEdit(self)\r\n self.absStateChargeBox1.setStyleSheet(GUI_Style.updateField)\r\n self.absStateChargeBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.absStateCharge_Layout1 = QHBoxLayout()\r\n self.absStateCharge_Layout1.addWidget(self.absStateCharge1)\r\n self.absStateCharge_Layout1.addWidget(self.absStateChargeBox1)\r\n\r\n def remainingCapacity_1(self):\r\n # create label\r\n self.remainingCapacity1 = QLabel(self)\r\n self.remainingCapacity1.setText(\"Remaining Capacity\")\r\n self.remainingCapacity1.setStyleSheet(GUI_Style.nameLabel)\r\n self.remainingCapacity1.setWordWrap(True)\r\n \r\n # create input box\r\n self.remainingCapacityBox1 = QLineEdit(self)\r\n self.remainingCapacityBox1.setStyleSheet(GUI_Style.updateField)\r\n self.remainingCapacityBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.remCap_Layout1 = QHBoxLayout()\r\n self.remCap_Layout1.addWidget(self.remainingCapacity1)\r\n self.remCap_Layout1.addWidget(self.remainingCapacityBox1)\r\n\r\n def fullChargeCapacity_1(self):\r\n # create label\r\n self.fullChargeCapacity1 = QLabel(self)\r\n self.fullChargeCapacity1.setText(\"Full Charge Capacity\")\r\n self.fullChargeCapacity1.setStyleSheet(GUI_Style.nameLabel)\r\n self.fullChargeCapacity1.setWordWrap(True)\r\n \r\n # create input box\r\n self.fullChargeCapacityBox1 = QLineEdit(self)\r\n self.fullChargeCapacityBox1.setStyleSheet(GUI_Style.updateField)\r\n self.fullChargeCapacityBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.fullCharge_Layout1 = QHBoxLayout()\r\n self.fullCharge_Layout1.addWidget(self.fullChargeCapacity1)\r\n self.fullCharge_Layout1.addWidget(self.fullChargeCapacityBox1)\r\n\r\n def runTimeToEmpty_1(self):\r\n # create label\r\n self.runTimeToEmpty1 = QLabel(self)\r\n self.runTimeToEmpty1.setText(\"Run Time To Empty\")\r\n self.runTimeToEmpty1.setStyleSheet(GUI_Style.nameLabel)\r\n self.runTimeToEmpty1.setWordWrap(True)\r\n \r\n # create input box\r\n self.runTimeToEmptyBox1 = QLineEdit(self)\r\n self.runTimeToEmptyBox1.setStyleSheet(GUI_Style.updateField)\r\n self.runTimeToEmptyBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.runTimeToEmpty_Layout1 = QHBoxLayout()\r\n self.runTimeToEmpty_Layout1.addWidget(self.runTimeToEmpty1)\r\n self.runTimeToEmpty_Layout1.addWidget(self.runTimeToEmptyBox1)\r\n\r\n def averageTimeToEmpty_1(self):\r\n # create label\r\n self.avgTimeToEmpty1 = QLabel(self)\r\n self.avgTimeToEmpty1.setText(\"Average Time To Empty\")\r\n self.avgTimeToEmpty1.setStyleSheet(GUI_Style.nameLabel)\r\n self.avgTimeToEmpty1.setWordWrap(True)\r\n \r\n # create input box\r\n self.avgTimeToEmptyBox1 = QLineEdit(self)\r\n self.avgTimeToEmptyBox1.setStyleSheet(GUI_Style.updateField)\r\n self.avgTimeToEmptyBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.avgTimeToEmpty_Layout1 = QHBoxLayout()\r\n self.avgTimeToEmpty_Layout1.addWidget(self.avgTimeToEmpty1)\r\n self.avgTimeToEmpty_Layout1.addWidget(self.avgTimeToEmptyBox1)\r\n\r\n def averageTimeToFull_1(self):\r\n # create label\r\n self.avgTimeToFull1 = QLabel(self)\r\n self.avgTimeToFull1.setText(\"Average Time To Full\")\r\n self.avgTimeToFull1.setStyleSheet(GUI_Style.nameLabel)\r\n self.avgTimeToFull1.setWordWrap(True)\r\n self.avgTimeToFull1.setWordWrap(True)\r\n \r\n # create input box\r\n self.avgTimeToFullBox1 = QLineEdit(self)\r\n self.avgTimeToFullBox1.setStyleSheet(GUI_Style.updateField)\r\n self.avgTimeToFullBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.avgTimeToFull_Layout1 = QHBoxLayout()\r\n self.avgTimeToFull_Layout1.addWidget(self.avgTimeToFull1)\r\n self.avgTimeToFull_Layout1.addWidget(self.avgTimeToFullBox1) \r\n \r\n def chargingCurrent_1(self):\r\n # create label\r\n self.chargingCurrent1 = QLabel(self)\r\n self.chargingCurrent1.setText(\"Charging Current\")\r\n self.chargingCurrent1.setStyleSheet(GUI_Style.nameLabel)\r\n self.chargingCurrent1.setWordWrap(True)\r\n \r\n # create input box\r\n self.chargingCurrentBox1 = QLineEdit(self)\r\n self.chargingCurrentBox1.setStyleSheet(GUI_Style.updateField)\r\n self.chargingCurrentBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.charging_Current_Layout1 = QHBoxLayout()\r\n self.charging_Current_Layout1.addWidget(self.chargingCurrent1)\r\n self.charging_Current_Layout1.addWidget(self.chargingCurrentBox1) \r\n\r\n def chargingVoltage_1(self):\r\n # create label\r\n self.chargingVoltage1 = QLabel(self)\r\n self.chargingVoltage1.setText(\"Charging Voltage\")\r\n self.chargingVoltage1.setStyleSheet(GUI_Style.nameLabel)\r\n self.chargingVoltage1.setWordWrap(True)\r\n \r\n # create input box\r\n self.chargingVoltageBox1 = QLineEdit(self)\r\n self.chargingVoltageBox1.setStyleSheet(GUI_Style.updateField)\r\n self.chargingVoltageBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.charging_Voltag_Layout1 = QHBoxLayout()\r\n self.charging_Voltag_Layout1.addWidget(self.chargingVoltage1)\r\n self.charging_Voltag_Layout1.addWidget(self.chargingVoltageBox1)\r\n\r\n def Battery_Status_1(self):\r\n # create label\r\n self.batteryStatus1 = QLabel(self)\r\n self.batteryStatus1.setText(\"Battery Status\")\r\n self.batteryStatus1.setStyleSheet(GUI_Style.nameLabel)\r\n self.batteryStatus1.setWordWrap(True)\r\n \r\n # create input box\r\n self.batteryStatusBox1 = QLineEdit(self)\r\n self.batteryStatusBox1.setStyleSheet(GUI_Style.updateField)\r\n self.batteryStatusBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.batteryStatusLayout1 = QHBoxLayout()\r\n self.batteryStatusLayout1.addWidget(self.batteryStatus1)\r\n self.batteryStatusLayout1.addWidget(self.batteryStatusBox1)\r\n\r\n def Cycle_Count_1(self):\r\n # create label\r\n self.cycleCount1 = QLabel(self)\r\n self.cycleCount1.setText(\"Cycle Count\")\r\n self.cycleCount1.setStyleSheet(GUI_Style.nameLabel)\r\n self.cycleCount1.setWordWrap(True)\r\n \r\n # create input box\r\n self.cycleCountBox1 = QLineEdit(self)\r\n self.cycleCountBox1.setStyleSheet(GUI_Style.updateField)\r\n self.cycleCountBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cycleCountLayout1 = QHBoxLayout()\r\n self.cycleCountLayout1.addWidget(self.cycleCount1)\r\n self.cycleCountLayout1.addWidget(self.cycleCountBox1)\r\n \r\n def Serial_Number_1(self):\r\n # create label\r\n self.serNum1 = QLabel(self)\r\n self.serNum1.setText(\"Serial Number\")\r\n self.serNum1.setStyleSheet(GUI_Style.nameLabel)\r\n self.serNum1.setWordWrap(True)\r\n \r\n # create input box\r\n self.serNumBox1 = QLineEdit(self)\r\n self.serNumBox1.setStyleSheet(GUI_Style.updateField)\r\n self.serNumBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.serialNumLayout1 = QHBoxLayout()\r\n self.serialNumLayout1.addWidget(self.serNum1)\r\n self.serialNumLayout1.addWidget(self.serNumBox1)\r\n \r\n def Device_Name_1(self):\r\n # create label\r\n self.devName1 = QLabel(self)\r\n self.devName1.setText(\"Device Name\")\r\n self.devName1.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.devNameBox1 = QLineEdit(self)\r\n self.devNameBox1.setStyleSheet(GUI_Style.updateField)\r\n self.devNameBox1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.deviceNameLayout1 = QHBoxLayout()\r\n self.deviceNameLayout1.addWidget(self.devName1)\r\n self.deviceNameLayout1.addWidget(self.devNameBox1)\r\n \r\n def Cell_Voltage4_1(self):\r\n # create label\r\n self.cellVolt4_1 = QLabel(self)\r\n self.cellVolt4_1.setText(\"Cell Voltage 4\")\r\n self.cellVolt4_1.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt4Box1 = QLineEdit(self)\r\n self.cellVolt4Box1.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt4Box1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage4_Layout1 = QHBoxLayout()\r\n self.cellVoltage4_Layout1.addWidget(self.cellVolt4_1)\r\n self.cellVoltage4_Layout1.addWidget(self.cellVolt4Box1)\r\n \r\n def Cell_Voltage3_1(self):\r\n # create label\r\n self.cellVolt3_1 = QLabel(self)\r\n self.cellVolt3_1.setText(\"Cell Voltage 3\")\r\n self.cellVolt3_1.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt3Box1 = QLineEdit(self)\r\n self.cellVolt3Box1.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt3Box1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage3_Layout1 = QHBoxLayout()\r\n self.cellVoltage3_Layout1.addWidget(self.cellVolt3_1)\r\n self.cellVoltage3_Layout1.addWidget(self.cellVolt3Box1)\r\n \r\n def Cell_Voltage2_1(self):\r\n # create label\r\n self.cellVolt2_1 = QLabel(self)\r\n self.cellVolt2_1.setText(\"Cell Voltage 2\")\r\n self.cellVolt2_1.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt2Box1 = QLineEdit(self)\r\n self.cellVolt2Box1.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt2Box1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage2_Layout1 = QHBoxLayout()\r\n self.cellVoltage2_Layout1.addWidget(self.cellVolt2_1)\r\n self.cellVoltage2_Layout1.addWidget(self.cellVolt2Box1)\r\n \r\n def Cell_Voltage1_1(self):\r\n # create label\r\n self.cellVolt1_1 = QLabel(self)\r\n self.cellVolt1_1.setText(\"Cell Voltage 1\")\r\n self.cellVolt1_1.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt1Box1 = QLineEdit(self)\r\n self.cellVolt1Box1.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt1Box1.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage1_Layout1 = QHBoxLayout()\r\n self.cellVoltage1_Layout1.addWidget(self.cellVolt1_1)\r\n self.cellVoltage1_Layout1.addWidget(self.cellVolt1Box1)\r\n \r\n ## ------------------------------------------------------------------\r\n ## ----------- Create Battery 2 Tab GUI Objects --------------------\r\n ## ------------------------------------------------------------------\r\n def remainingCapaAlarm_2(self):\r\n # create label\r\n self.remCapAlarm2 = QLabel(self)\r\n self.remCapAlarm2.setText(\"Remaining Capacity Alarm\")\r\n self.remCapAlarm2.setStyleSheet(GUI_Style.nameLabel)\r\n self.remCapAlarm2.setWordWrap(True)\r\n \r\n # create input box\r\n self.remCapAlarmBox2 = QLineEdit(self)\r\n self.remCapAlarmBox2.setStyleSheet(GUI_Style.updateField)\r\n self.remCapAlarmBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.remCapAlarm_Layout2 = QHBoxLayout()\r\n self.remCapAlarm_Layout2.addWidget(self.remCapAlarm2)\r\n self.remCapAlarm_Layout2.addWidget(self.remCapAlarmBox2)\r\n\r\n def batteryMode_2(self):\r\n # create label\r\n self.battMode2 = QLabel(self)\r\n self.battMode2.setText(\"Battery Mode\")\r\n self.battMode2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.battModeBox2 = QLineEdit(self)\r\n self.battModeBox2.setStyleSheet(GUI_Style.updateField)\r\n self.battModeBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.battMode_Layout2 = QHBoxLayout()\r\n self.battMode_Layout2.addWidget(self.battMode2)\r\n self.battMode_Layout2.addWidget(self.battModeBox2)\r\n\r\n def voltage_2(self):\r\n # create label\r\n self.voltage2 = QLabel(self)\r\n self.voltage2.setText(\"Voltage\")\r\n self.voltage2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.voltageBox2 = QLineEdit(self)\r\n self.voltageBox2.setStyleSheet(GUI_Style.updateField)\r\n self.voltageBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.voltage_Layout2 = QHBoxLayout()\r\n self.voltage_Layout2.addWidget(self.voltage2)\r\n self.voltage_Layout2.addWidget(self.voltageBox2)\r\n\r\n def current_2(self):\r\n # create label\r\n self.current2 = QLabel(self)\r\n self.current2.setText(\"Current\")\r\n self.current2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.currentBox2 = QLineEdit(self)\r\n self.currentBox2.setStyleSheet(GUI_Style.updateField)\r\n self.currentBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.current_Layout2 = QHBoxLayout()\r\n self.current_Layout2.addWidget(self.current2)\r\n self.current_Layout2.addWidget(self.currentBox2)\r\n\r\n def averageCurrent_2(self):\r\n # create label\r\n self.avgCurr2 = QLabel(self)\r\n self.avgCurr2.setText(\"Average Current\")\r\n self.avgCurr2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.avgCurrBox2 = QLineEdit(self)\r\n self.avgCurrBox2.setStyleSheet(GUI_Style.updateField)\r\n self.avgCurrBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.avgCurr_Layout2 = QHBoxLayout()\r\n self.avgCurr_Layout2.addWidget(self.avgCurr2)\r\n self.avgCurr_Layout2.addWidget(self.avgCurrBox2)\r\n\r\n def relativeStatOfCharge_2(self):\r\n # create label\r\n self.relStateCharge2 = QLabel(self)\r\n self.relStateCharge2.setText(\"Relative State Of Charge\")\r\n self.relStateCharge2.setStyleSheet(GUI_Style.nameLabel)\r\n self.relStateCharge2.setWordWrap(True)\r\n \r\n # create input box\r\n self.relStateChargeBox2 = QLineEdit(self)\r\n self.relStateChargeBox2.setStyleSheet(GUI_Style.updateField)\r\n self.relStateChargeBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.relStateCharge_Layout2 = QHBoxLayout()\r\n self.relStateCharge_Layout2.addWidget(self.relStateCharge2)\r\n self.relStateCharge_Layout2.addWidget(self.relStateChargeBox2)\r\n\r\n def absoluteStatOfCharge_2(self):\r\n # create label\r\n self.absStateCharge2 = QLabel(self)\r\n self.absStateCharge2.setText(\"Absolute State Of Charge\")\r\n self.absStateCharge2.setStyleSheet(GUI_Style.nameLabel)\r\n self.absStateCharge2.setWordWrap(True)\r\n \r\n # create input box\r\n self.absStateChargeBox2 = QLineEdit(self)\r\n self.absStateChargeBox2.setStyleSheet(GUI_Style.updateField)\r\n self.absStateChargeBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.absStateCharge_Layout2 = QHBoxLayout()\r\n self.absStateCharge_Layout2.addWidget(self.absStateCharge2)\r\n self.absStateCharge_Layout2.addWidget(self.absStateChargeBox2)\r\n\r\n def remainingCapacity_2(self):\r\n # create label\r\n self.remainingCapacity2 = QLabel(self)\r\n self.remainingCapacity2.setText(\"Remaining Capacity\")\r\n self.remainingCapacity2.setStyleSheet(GUI_Style.nameLabel)\r\n self.remainingCapacity2.setWordWrap(True)\r\n \r\n # create input box\r\n self.remainingCapacityBox2 = QLineEdit(self)\r\n self.remainingCapacityBox2.setStyleSheet(GUI_Style.updateField)\r\n self.remainingCapacityBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.remCap_Layout2 = QHBoxLayout()\r\n self.remCap_Layout2.addWidget(self.remainingCapacity2)\r\n self.remCap_Layout2.addWidget(self.remainingCapacityBox2)\r\n\r\n def fullChargeCapacity_2(self):\r\n # create label\r\n self.fullChargeCapacity2 = QLabel(self)\r\n self.fullChargeCapacity2.setText(\"Full Charge Capacity\")\r\n self.fullChargeCapacity2.setStyleSheet(GUI_Style.nameLabel)\r\n self.fullChargeCapacity2.setWordWrap(True)\r\n \r\n # create input box\r\n self.fullChargeCapacityBox2 = QLineEdit(self)\r\n self.fullChargeCapacityBox2.setStyleSheet(GUI_Style.updateField)\r\n self.fullChargeCapacityBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.fullCharge_Layout2 = QHBoxLayout()\r\n self.fullCharge_Layout2.addWidget(self.fullChargeCapacity2)\r\n self.fullCharge_Layout2.addWidget(self.fullChargeCapacityBox2)\r\n\r\n def runTimeToEmpty_2(self):\r\n # create label\r\n self.runTimeToEmpty2 = QLabel(self)\r\n self.runTimeToEmpty2.setText(\"Run Time To Empty\")\r\n self.runTimeToEmpty2.setStyleSheet(GUI_Style.nameLabel)\r\n self.runTimeToEmpty2.setWordWrap(True)\r\n \r\n # create input box\r\n self.runTimeToEmptyBox2 = QLineEdit(self)\r\n self.runTimeToEmptyBox2.setStyleSheet(GUI_Style.updateField)\r\n self.runTimeToEmptyBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.runTimeToEmpty_Layout2 = QHBoxLayout()\r\n self.runTimeToEmpty_Layout2.addWidget(self.runTimeToEmpty2)\r\n self.runTimeToEmpty_Layout2.addWidget(self.runTimeToEmptyBox2)\r\n\r\n def averageTimeToEmpty_2(self):\r\n # create label\r\n self.avgTimeToEmpty2 = QLabel(self)\r\n self.avgTimeToEmpty2.setText(\"Average Time To Empty\")\r\n self.avgTimeToEmpty2.setStyleSheet(GUI_Style.nameLabel)\r\n self.avgTimeToEmpty2.setWordWrap(True)\r\n \r\n # create input box\r\n self.avgTimeToEmptyBox2 = QLineEdit(self)\r\n self.avgTimeToEmptyBox2.setStyleSheet(GUI_Style.updateField)\r\n self.avgTimeToEmptyBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.avgTimeToEmpty_Layout2 = QHBoxLayout()\r\n self.avgTimeToEmpty_Layout2.addWidget(self.avgTimeToEmpty2)\r\n self.avgTimeToEmpty_Layout2.addWidget(self.avgTimeToEmptyBox2)\r\n\r\n def averageTimeToFull_2(self):\r\n # create label\r\n self.avgTimeToFull2 = QLabel(self)\r\n self.avgTimeToFull2.setText(\"Average Time To Full\")\r\n self.avgTimeToFull2.setStyleSheet(GUI_Style.nameLabel)\r\n self.avgTimeToFull2.setWordWrap(True)\r\n \r\n # create input box\r\n self.avgTimeToFullBox2 = QLineEdit(self)\r\n self.avgTimeToFullBox2.setStyleSheet(GUI_Style.updateField)\r\n self.avgTimeToFullBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.avgTimeToFull_Layout2 = QHBoxLayout()\r\n self.avgTimeToFull_Layout2.addWidget(self.avgTimeToFull2)\r\n self.avgTimeToFull_Layout2.addWidget(self.avgTimeToFullBox2) \r\n \r\n def chargingCurrent_2(self):\r\n # create label\r\n self.chargingCurrent2 = QLabel(self)\r\n self.chargingCurrent2.setText(\"Charging Current\")\r\n self.chargingCurrent2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.chargingCurrentBox2 = QLineEdit(self)\r\n self.chargingCurrentBox2.setStyleSheet(GUI_Style.updateField)\r\n self.chargingCurrentBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.charging_Current_Layout2 = QHBoxLayout()\r\n self.charging_Current_Layout2.addWidget(self.chargingCurrent2)\r\n self.charging_Current_Layout2.addWidget(self.chargingCurrentBox2) \r\n\r\n def chargingVoltage_2(self):\r\n # create label\r\n self.chargingVoltage2 = QLabel(self)\r\n self.chargingVoltage2.setText(\"Charging Voltage\")\r\n self.chargingVoltage2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.chargingVoltageBox2 = QLineEdit(self)\r\n self.chargingVoltageBox2.setStyleSheet(GUI_Style.updateField)\r\n self.chargingVoltageBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.charging_Voltag_Layout2 = QHBoxLayout()\r\n self.charging_Voltag_Layout2.addWidget(self.chargingVoltage2)\r\n self.charging_Voltag_Layout2.addWidget(self.chargingVoltageBox2)\r\n\r\n def Battery_Status_2(self):\r\n # create label\r\n self.batteryStatus2 = QLabel(self)\r\n self.batteryStatus2.setText(\"Battery Status\")\r\n self.batteryStatus2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.batteryStatusBox2 = QLineEdit(self)\r\n self.batteryStatusBox2.setStyleSheet(GUI_Style.updateField)\r\n self.batteryStatusBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.batteryStatusLayout2 = QHBoxLayout()\r\n self.batteryStatusLayout2.addWidget(self.batteryStatus2)\r\n self.batteryStatusLayout2.addWidget(self.batteryStatusBox2)\r\n\r\n def Cycle_Count_2(self):\r\n # create label\r\n self.cycleCount2 = QLabel(self)\r\n self.cycleCount2.setText(\"Cycle Count\")\r\n self.cycleCount2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cycleCountBox2 = QLineEdit(self)\r\n self.cycleCountBox2.setStyleSheet(GUI_Style.updateField)\r\n self.cycleCountBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cycleCountLayout2 = QHBoxLayout()\r\n self.cycleCountLayout2.addWidget(self.cycleCount2)\r\n self.cycleCountLayout2.addWidget(self.cycleCountBox2)\r\n \r\n def Serial_Number_2(self):\r\n # create label\r\n self.serNum2 = QLabel(self)\r\n self.serNum2.setText(\"Serial Number\")\r\n self.serNum2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.serNumBox2 = QLineEdit(self)\r\n self.serNumBox2.setStyleSheet(GUI_Style.updateField)\r\n self.serNumBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.serialNumLayout2 = QHBoxLayout()\r\n self.serialNumLayout2.addWidget(self.serNum2)\r\n self.serialNumLayout2.addWidget(self.serNumBox2)\r\n \r\n def Device_Name_2(self):\r\n # create label\r\n self.devName2 = QLabel(self)\r\n self.devName2.setText(\"Device Name\")\r\n self.devName2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.devNameBox2 = QLineEdit(self)\r\n self.devNameBox2.setStyleSheet(GUI_Style.updateField)\r\n self.devNameBox2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.deviceNameLayout2 = QHBoxLayout()\r\n self.deviceNameLayout2.addWidget(self.devName2)\r\n self.deviceNameLayout2.addWidget(self.devNameBox2)\r\n \r\n def Cell_Voltage4_2(self):\r\n # create label\r\n self.cellVolt4_2 = QLabel(self)\r\n self.cellVolt4_2.setText(\"Cell Voltage 4\")\r\n self.cellVolt4_2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt4Box2 = QLineEdit(self)\r\n self.cellVolt4Box2.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt4Box2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage4_Layout2 = QHBoxLayout()\r\n self.cellVoltage4_Layout2.addWidget(self.cellVolt4_2)\r\n self.cellVoltage4_Layout2.addWidget(self.cellVolt4Box2)\r\n \r\n def Cell_Voltage3_2(self):\r\n # create label\r\n self.cellVolt3_2 = QLabel(self)\r\n self.cellVolt3_2.setText(\"Cell Voltage 3\")\r\n self.cellVolt3_2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt3Box2 = QLineEdit(self)\r\n self.cellVolt3Box2.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt3Box2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage3_Layout2 = QHBoxLayout()\r\n self.cellVoltage3_Layout2.addWidget(self.cellVolt3_2)\r\n self.cellVoltage3_Layout2.addWidget(self.cellVolt3Box2)\r\n \r\n def Cell_Voltage2_2(self):\r\n # create label\r\n self.cellVolt2_2 = QLabel(self)\r\n self.cellVolt2_2.setText(\"Cell Voltage 2\")\r\n self.cellVolt2_2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt2Box2 = QLineEdit(self)\r\n self.cellVolt2Box2.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt2Box2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage2_Layout2 = QHBoxLayout()\r\n self.cellVoltage2_Layout2.addWidget(self.cellVolt2_2)\r\n self.cellVoltage2_Layout2.addWidget(self.cellVolt2Box2)\r\n \r\n def Cell_Voltage1_2(self):\r\n # create label\r\n self.cellVolt1_2 = QLabel(self)\r\n self.cellVolt1_2.setText(\"Cell Voltage 1\")\r\n self.cellVolt1_2.setStyleSheet(GUI_Style.nameLabel)\r\n \r\n # create input box\r\n self.cellVolt1Box2 = QLineEdit(self)\r\n self.cellVolt1Box2.setStyleSheet(GUI_Style.updateField)\r\n self.cellVolt1Box2.setMaximumWidth(50)\r\n\r\n # create layout\r\n self.cellVoltage1_Layout2 = QHBoxLayout()\r\n self.cellVoltage1_Layout2.addWidget(self.cellVolt1_2)\r\n self.cellVoltage1_Layout2.addWidget(self.cellVolt1Box2)\r\n\r\n # ------------------------------------------------------------------\r\n # ---------------- Create Status Bar Widgets -----------------------\r\n # ------------------------------------------------------------------\r\n def StatusBar(self):\r\n self.statusBar = QStatusBar()\r\n self.statusBar.setStyleSheet(GUI_Style.statusBarWhite)\r\n \r\n self.relay1_40 = Relay_1_40_Ohm(self, \"40\\u03A9 Relay 1\", self.GPIO_ch1)\r\n self.relay1_40.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay1_40.pressed.connect(self.relay1_40.On_Click)\r\n self.relay1_40.released.connect(self.relay1_40.Un_Click)\r\n \r\n self.relay1_60 = Relay_1_60_Ohm(self, \"60\\u03A9 Relay 1\", self.GPIO_ch1)\r\n self.relay1_60.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay1_60.pressed.connect(self.relay1_60.On_Click)\r\n self.relay1_60.released.connect(self.relay1_60.Un_Click)\r\n \r\n self.relay1_500 = Relay_1_500_Ohm(self, \"500\\u03A9 Relay 1\", self.GPIO_ch1)\r\n self.relay1_500.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay1_500.pressed.connect(self.relay1_500.On_Click)\r\n self.relay1_500.released.connect(self.relay1_500.Un_Click)\r\n \r\n self.relay1_1k = Relay_1_1k_Ohm(self, \"1k\\u03A9 Relay 1\", self.GPIO_ch1)\r\n self.relay1_1k.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay1_1k.pressed.connect(self.relay1_1k.On_Click)\r\n self.relay1_1k.released.connect(self.relay1_1k.Un_Click)\r\n \r\n self.relay2_40 = Relay_2_40_Ohm(self, \"40\\u03A9 Relay 2\", self.GPIO_ch2)\r\n self.relay2_40.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay2_40.pressed.connect(self.relay2_40.On_Click)\r\n self.relay2_40.released.connect(self.relay2_40.Un_Click)\r\n \r\n self.relay2_60 = Relay_2_60_Ohm(self, \"60\\u03A9 Relay 2\", self.GPIO_ch2)\r\n self.relay2_60.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay2_60.pressed.connect(self.relay2_60.On_Click)\r\n self.relay2_60.released.connect(self.relay2_60.Un_Click)\r\n \r\n self.relay2_500 = Relay_2_500_Ohm(self, \"500\\u03A9 Relay 2\", self.GPIO_ch2)\r\n self.relay2_500.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay2_500.pressed.connect(self.relay2_500.On_Click)\r\n self.relay2_500.released.connect(self.relay2_500.Un_Click)\r\n \r\n self.relay2_1k = Relay_2_1k_Ohm(self, \"1k\\u03A9 Relay 2\", self.GPIO_ch2)\r\n self.relay2_1k.setStyleSheet(GUI_Style.buttonPressed)\r\n self.relay2_1k.pressed.connect(self.relay2_1k.On_Click)\r\n self.relay2_1k.released.connect(self.relay2_1k.Un_Click)\r\n\r\n self.statusBar.addPermanentWidget(self.relay1_40, 0)\r\n self.statusBar.addPermanentWidget(self.relay1_60, 0)\r\n self.statusBar.addPermanentWidget(self.relay1_500, 0)\r\n self.statusBar.addPermanentWidget(self.relay1_1k, 0)\r\n self.statusBar.addPermanentWidget(self.relay2_40, 0)\r\n self.statusBar.addPermanentWidget(self.relay2_60, 0)\r\n self.statusBar.addPermanentWidget(self.relay2_500, 0)\r\n self.statusBar.addPermanentWidget(self.relay2_1k, 0)\r\n\r\n self.statusBar.showMessage(\"Starting Up... \", 4000)\r\n\r\n # ------------------------------------------------------------------\r\n # ----------- Close All Threads at app closure ---------------------\r\n # ------------------------------------------------------------------\r\n # Stop all threads when GUI is closed\r\n def closeEvent(self, *args, **kwargs):\r\n GPIO.cleanup()\r\n self.GPIO_ch1.Set_Exit_Program(True)\r\n self.GPIO_ch1.wait(100)\r\n self.GPIO_ch2.Set_Exit_Program(True)\r\n self.GPIO_ch2.wait(100)\r\n \r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# -------------------- MAIN LOOP ---------------------------------------\r\n# ----------------------------------------------------------------------\r\ndef run():\r\n # Run the application\r\n app = QApplication(sys.argv)\r\n # Create GUI\r\n GUI = Window()\r\n # Exit\r\n sys.exit(app.exec())\r\n\r\n\r\n# Main code\r\nif __name__ == \"__main__\":\r\n run()\r\n\r\n", "sub_path": "Battery_TF_Main.py", "file_name": "Battery_TF_Main.py", "file_ext": "py", "file_size_in_byte": 51172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.getcwd", "line_number": 25, "usage_type": "call"}, {"api_name": "GUI_Stylesheets.GUI_Stylesheets", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 55, "usage_type": "call"}, {"api_name": "RPi.GPIO.setmode", "line_number": 61, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 61, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 61, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 62, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 62, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 62, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 63, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 63, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 64, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 64, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 65, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 65, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 65, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 66, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 66, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 67, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 67, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 68, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 68, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 68, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 69, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 69, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 69, "usage_type": "attribute"}, {"api_name": "GPIO_thread1.GPIO_Ch1_Thread", "line_number": 74, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 74, "usage_type": "argument"}, {"api_name": "GPIO_thread2.GPIO_Ch2_Thread", "line_number": 75, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 75, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 89, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 115, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignRight", "line_number": 116, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 116, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 117, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 117, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 122, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 140, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 174, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 188, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 202, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 238, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 252, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 266, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Key_Return", "line_number": 302, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 302, "usage_type": "name"}, {"api_name": "buttonHandler.handlers", "line_number": 308, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 316, "usage_type": "call"}, {"api_name": "GUI_Buttons.Logo_Button", "line_number": 321, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 325, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 326, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 331, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignTop", "line_number": 337, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 337, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 342, "usage_type": "call"}, {"api_name": "GUI_Buttons.Send_Command_Button", "line_number": 349, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 359, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 364, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 365, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 375, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 381, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 386, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 393, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 399, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 404, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 410, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 416, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 421, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 427, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 433, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 438, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 444, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 450, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 455, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 461, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 467, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 472, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 478, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 484, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 489, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 495, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 501, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 506, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 512, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 518, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 523, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 529, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 535, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 540, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 546, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 552, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 557, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 563, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 570, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 575, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 581, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 587, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 592, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 598, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 604, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 609, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 615, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 621, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 626, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 632, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 638, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 643, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 649, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 655, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 660, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 666, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 671, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 676, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 682, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 687, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 692, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 698, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 703, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 708, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 714, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 719, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 724, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 730, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 735, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 740, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 749, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 755, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 760, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 766, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 771, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 776, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 782, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 787, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 792, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 798, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 803, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 808, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 814, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 819, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 824, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 830, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 836, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 841, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 847, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 853, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 858, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 864, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 870, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 875, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 881, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 887, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 892, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 898, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 904, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 909, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 915, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 921, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 926, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 932, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 938, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 943, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 949, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 954, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 959, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 965, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 970, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 975, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 981, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 986, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 991, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 997, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1002, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1007, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 1013, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1018, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1023, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 1029, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1034, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1039, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 1045, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1050, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1055, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 1061, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1066, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1071, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 1077, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1082, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1087, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 1093, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1098, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1103, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QStatusBar", "line_number": 1111, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_1_40_Ohm", "line_number": 1114, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_1_60_Ohm", "line_number": 1119, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_1_500_Ohm", "line_number": 1124, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_1_1k_Ohm", "line_number": 1129, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_2_40_Ohm", "line_number": 1134, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_2_60_Ohm", "line_number": 1139, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_2_500_Ohm", "line_number": 1144, "usage_type": "call"}, {"api_name": "GUI_Buttons.Relay_2_1k_Ohm", "line_number": 1149, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 1170, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 1170, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1183, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1183, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1187, "usage_type": "call"}]} +{"seq_id": "373676421", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nset of functions to drive EasyQuake\n\"\"\"\n#import sys\n#sys.path.append(\"/home/jwalter/syncpython\")\nfrom .phasepapy import fbpicker\npathgpd = '/'.join(str(fbpicker.__file__).split(\"/\")[:-2])+'/gpd_predict'\n\nfrom .phasepapy import tables1D, assoc1D\nfrom .phasepapy import tt_stations_1D\nfrom obspy import UTCDateTime\nfrom obspy import Inventory, read_inventory\nfrom obspy.clients.fdsn import Client\nfrom obspy import read\nimport datetime\nimport numpy as np\nimport glob\nimport sys\nimport os\nimport stat\nst = os.stat(pathgpd+'/gpd_predict.py')\nos.chmod(pathgpd+'/gpd_predict.py', st.st_mode | stat.S_IEXEC)\nimport obspy.taup as taup\nfrom obspy import geodetics\nfrom obspy.clients.fdsn.mass_downloader import CircularDomain, RectangularDomain, Restrictions, MassDownloader\nfrom obspy.core.event.base import WaveformStreamID\nfrom obspy.core.event import ResourceIdentifier\nfrom sqlalchemy.orm import *\nfrom sqlalchemy import create_engine\nimport numpy as np\nimport obspy.core as oc\nimport pandas as pd\nimport sqlite3\nfrom sqlite3 import Error\nfrom obspy.geodetics import gps2dist_azimuth\n\nimport pylab as plt\nimport re\nfrom datetime import datetime\n#from mpl_toolkits.basemap import Basemap\n\n\nfrom sqlalchemy import create_engine\nfrom obspy import Stream\nfrom obspy.core.event import Catalog, Event, Magnitude, Origin, Pick, StationMagnitude, Amplitude, Arrival\nfrom obspy.signal.invsim import simulate_seismometer as seis_sim\nfmtP = \"%4s%1sP%1s%1i %15s\"\nfmtS = \"%12s%1sS%1s%1i\\n\"\n\n\n\nfmt = \"%6s%02i%05.2f%1s%03i%05.2f%1s%4i\\n\"\n\n\n#min_proba = 0.993 # Minimum softmax probability for phase detection\n## try 0.992 if you have the computing power\n#freq_min = 3.0\n#freq_max = 20.0\n#filter_data = True\n#decimate_data = True # If false, assumes data is already 100 Hz samprate\n#n_shift = 10 # Number of samples to shift the sliding window at a time\n#n_gpu = 1 # Number of GPUs to use (if any)\n######################\n#batch_size = 1000*3\n#\n#half_dur = 2.00\n#only_dt = 0.01\n#n_win = int(half_dur/only_dt)\n#n_feat = 2*n_win\n\n\nfrom datetime import timedelta, date\n\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\nclass SCNL():\n \"\"\" This class is copied from PhasePaPy\"\"\"\n def __init__(self,input=None):\n if not isinstance(input, SCNL):\n self.station=None\n self.channel=None\n self.network=None\n self.location=None\n if type(input) is str:\n self.parse_scnlstr(input)\n if type(input) is list:\n if len(input)==4:\n self.station,self.channel,self.network,self.location=input\n if len(input)==3:\n self.station,self.channel,self.network=input\n \n\ndef download_mseed(dirname=None, project_folder=None, single_date=None, minlat=None, maxlat=None, minlon=None, maxlon=None):\n starting = UTCDateTime(single_date.strftime(\"%Y\")+'-'+single_date.strftime(\"%m\")+'-'+single_date.strftime(\"%d\")+'T00:00:00.0')\n stopping = starting + 86430\n starttime = starting\n endtime = stopping\n #domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)\n domain = RectangularDomain(minlatitude=minlat, maxlatitude=maxlat,minlongitude=minlon, maxlongitude=maxlon)\n #domain = RectangularDomain(minlatitude=-90, maxlatitude=-60,minlongitude=-180, maxlongitude=180)\n restrictions = Restrictions(starttime=starttime, endtime=endtime,reject_channels_with_gaps=False,minimum_length=0,minimum_interstation_distance_in_m=5000, channel_priorities=[\"HH[ZNE12]\", \"BH[ZNE12]\",\"EH[ZNE12]\",\"SH[ZNE12]\",\"HN[ZNE12]\",\"EN[ZNE12]\"])\n mseed1 = project_folder+'/'+dirname\n if not os.path.exists(mseed1):\n os.makedirs(mseed1) #domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)\n #original1 = project_folder+'/*.[BH]??__'+dirname+'*'\n #os.system(\"mv %s %s\" % (original1,mseed1))\n mdl = MassDownloader()\n mdl.download(domain, restrictions, threads_per_client=4, mseed_storage=mseed1,stationxml_storage=mseed1)\n\ndef download_mseed_event(dirname=None, project_folder=None, starting=None, stopping = None, minlat=None, maxlat=None, minlon=None, maxlon=None, maxrad=None):\n starttime = starting\n endtime = stopping\n #domain = CircularDomain(lat1,lon1,minradius=0.0, maxradius=maxrad)\n domain = RectangularDomain(minlatitude=minlat, maxlatitude=maxlat,minlongitude=minlon, maxlongitude=maxlon)\n #domain = RectangularDomain(minlatitude=-90, maxlatitude=-60,minlongitude=-180, maxlongitude=180)\n restrictions = Restrictions(starttime=starttime, endtime=endtime,chunklength_in_sec=86400,reject_channels_with_gaps=False,minimum_length=0,minimum_interstation_distance_in_m=5000, channel_priorities=[\"HH[ZNE12]\", \"BH[ZNE12]\",\"EH[ZNE12]\",\"SH[ZNE12]\",\"HN[ZNE12]\",\"EN[ZNE12]\"])\n mseed1 = project_folder+'/'+dirname\n if not os.path.exists(mseed1):\n os.makedirs(mseed1) #domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)\n #original1 = project_folder+'/*.[BH]??__'+dirname+'*'\n #os.system(\"mv %s %s\" % (original1,mseed1))\n mdl = MassDownloader()\n mdl.download(domain, restrictions, threads_per_client=4, mseed_storage=mseed1,stationxml_storage=mseed1)\n \ndef download_mseed_event_radial(dirname=None, project_folder=None, starting=None, stopping = None, lat1=None, lon1=None, maxrad=None):\n starttime = starting\n endtime = stopping\n domain = CircularDomain(lat1,lon1,minradius=0.0, maxradius=maxrad)\n #domain = RectangularDomain(minlatitude=minlat, maxlatitude=maxlat,minlongitude=minlon, maxlongitude=maxlon)\n #domain = RectangularDomain(minlatitude=-90, maxlatitude=-60,minlongitude=-180, maxlongitude=180)\n restrictions = Restrictions(starttime=starttime, endtime=endtime,chunklength_in_sec=86400,reject_channels_with_gaps=False,minimum_length=0,minimum_interstation_distance_in_m=1000, channel_priorities=[\"HH[ZNE12]\", \"BH[ZNE12]\",\"EH[ZNE12]\",\"SH[ZNE12]\",\"HN[ZNE12]\",\"EN[ZNE12]\"])\n mseed1 = project_folder+'/'+dirname\n if not os.path.exists(mseed1):\n os.makedirs(mseed1) #domain = CircularDomain(-90,0,minradius=0.0, maxradius=30.0)\n #original1 = project_folder+'/*.[BH]??__'+dirname+'*'\n #os.system(\"mv %s %s\" % (original1,mseed1))\n mdl = MassDownloader()\n mdl.download(domain, restrictions, threads_per_client=4, mseed_storage=mseed1,stationxml_storage=mseed1)\n\ndef process_local_sac():\n print('Local sac files')\n\n\n \ndef build_tt_tables(lat1=None,long1=None,maxrad=None,starting=None, stopping=None, channel_codes=['EH','BH','HH','HN'],db=None,maxdist=500.,source_depth=5.):\n \"\"\" \n \"\"\"\n # Create a connection to an sqlalchemy database\n tt_engine=create_engine(db,echo=False)\n tt_stations_1D.BaseTT1D.metadata.create_all(tt_engine)\n TTSession=sessionmaker(bind=tt_engine)\n tt_session=TTSession()\n fdsnclient=Client()\n inv=fdsnclient.get_stations(starttime=starting,endtime=stopping,latitude=lat1,longitude=long1,maxradius=maxrad,channel='*HZ',level='channel')\n # Get inventory\n for net in inv:\n network=net.code\n for sta in net:\n loccodes=[]\n for ch in sta:\n for cc in channel_codes:\n if re.match(cc,ch.code):\n if not ch.location_code in loccodes:\n loccodes.append(ch.location_code)\n for loc in loccodes:\n print(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)\n station=tt_stations_1D.Station1D(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)\n tt_session.add(station)\n tt_session.commit()\n\n # Now we have to build our traveltime lookup tables\n # We will use IASP91 here but obspy.taup does let you build your own model\n velmod=taup.TauPyModel(model='iasp91')\n delta_distance=1. # km for spacing tt calculations \n distance_km=np.arange(0,maxdist+delta_distance,delta_distance)\n for d_km in distance_km:\n d_deg=geodetics.kilometer2degrees(d_km)\n ptimes=[]\n stimes=[]\n p_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,\n distance_in_degree=d_deg,phase_list=['P','p'])\n for p in p_arrivals:\n ptimes.append(p.time)\n s_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,\n distance_in_degree=d_deg,phase_list=['S','s'])\n for s in s_arrivals:\n stimes.append(s.time)\n tt_entry=tt_stations_1D.TTtable1D(d_km,d_deg,np.min(ptimes),np.min(stimes),np.min(stimes)-np.min(ptimes))\n tt_session.add(tt_entry)\n tt_session.commit() # Probably faster to do the commit outside of loop but oh well\n tt_session.close()\n return inv\n\ndef build_tt_tables_local_directory(dirname=None,project_folder=None,channel_codes=['EH','BH','HH','HN'],db=None,maxdist=800.,source_depth=5.):\n \"\"\" \n \"\"\"\n # Create a connection to an sqlalchemy database\n tt_engine=create_engine(db,echo=False)\n tt_stations_1D.BaseTT1D.metadata.create_all(tt_engine)\n TTSession=sessionmaker(bind=tt_engine)\n tt_session=TTSession()\n inv = Inventory()\n dir1a = glob.glob(project_folder+'/'+dirname+'/*xml')\n for file1 in dir1a:\n inv1a = read_inventory(file1)\n inv.networks.extend(inv1a)\n for net in inv:\n network=net.code\n for sta in net:\n loccodes=[]\n for ch in sta:\n for cc in channel_codes:\n if re.match(cc,ch.code):\n if not ch.location_code in loccodes:\n loccodes.append(ch.location_code)\n for loc in loccodes:\n print(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)\n station=tt_stations_1D.Station1D(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)\n tt_session.add(station)\n tt_session.commit()\n\n # Now we have to build our traveltime lookup tables\n # We will use IASP91 here but obspy.taup does let you build your own model\n velmod=taup.TauPyModel(model='iasp91')\n delta_distance=1. # km for spacing tt calculations \n distance_km=np.arange(0,maxdist+delta_distance,delta_distance)\n for d_km in distance_km:\n d_deg=geodetics.kilometer2degrees(d_km)\n ptimes=[]\n stimes=[]\n p_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,\n distance_in_degree=d_deg,phase_list=['P','p'])\n for p in p_arrivals:\n ptimes.append(p.time)\n s_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,\n distance_in_degree=d_deg,phase_list=['S','s'])\n for s in s_arrivals:\n stimes.append(s.time)\n tt_entry=tt_stations_1D.TTtable1D(d_km,d_deg,np.min(ptimes),np.min(stimes),np.min(stimes)-np.min(ptimes))\n tt_session.add(tt_entry)\n tt_session.commit() # Probably faster to do the commit outside of loop but oh well\n tt_session.close()\n return inv\n\n\n \ndef build_tt_tables_local_directory_ant(dirname=None,project_folder=None,channel_codes=['EH','BH','HH'],db=None,maxdist=800.,source_depth=5.):\n \"\"\" \n \"\"\"\n # Create a connection to an sqlalchemy database\n tt_engine=create_engine(db,echo=False)\n tt_stations_1D.BaseTT1D.metadata.create_all(tt_engine)\n TTSession=sessionmaker(bind=tt_engine)\n tt_session=TTSession()\n inv = Inventory()\n dir1a = glob.glob(project_folder+'/'+dirname+'/*xml')\n m = Basemap(projection='spstere',boundinglat=-60,lon_0=180,resolution='i')\n \n for file1 in dir1a:\n inv1a = read_inventory(file1)\n inv.networks.extend(inv1a)\n for net in inv:\n network=net.code\n for sta in net:\n loccodes=[]\n for ch in sta:\n for cc in channel_codes:\n if re.match(cc,ch.code):\n if not ch.location_code in loccodes:\n loccodes.append(ch.location_code)\n for loc in loccodes:\n print(sta.code,network,loc,sta.latitude,sta.longitude,sta.elevation)\n x,y = m(sta.longitude,sta.latitude)\n\n station=tt_stations_1D.Station1D(sta.code,network,loc,y,x,sta.elevation)\n tt_session.add(station)\n tt_session.commit()\n\n # Now we have to build our traveltime lookup tables\n # We will use IASP91 here but obspy.taup does let you build your own model\n velmod=taup.TauPyModel(model='iasp91')\n delta_distance=1. # km for spacing tt calculations \n distance_km=np.arange(0,maxdist+delta_distance,delta_distance)\n for d_km in distance_km:\n d_deg=geodetics.kilometer2degrees(d_km)\n ptimes=[]\n stimes=[]\n p_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,\n distance_in_degree=d_deg,phase_list=['P','p'])\n for p in p_arrivals:\n ptimes.append(p.time)\n s_arrivals=velmod.get_travel_times(source_depth_in_km=source_depth,\n distance_in_degree=d_deg,phase_list=['S','s'])\n for s in s_arrivals:\n stimes.append(s.time)\n tt_entry=tt_stations_1D.TTtable1D(d_km,d_deg,np.min(ptimes),np.min(stimes),np.min(stimes)-np.min(ptimes))\n tt_session.add(tt_entry)\n tt_session.commit() # Probably faster to do the commit outside of loop but oh well\n tt_session.close()\n return inv\n \ndef fb_pick(dbengine=None,picker=None,fileinput=None):\n fdir = []\n engine_assoc=dbengine\n with open(fileinput) as f:\n for line in f:\n tmp = line.split()\n fdir.append([tmp[0], tmp[1], tmp[2]])\n nsta = len(fdir)\n\n for i in range(nsta):\n Session=sessionmaker(bind=engine_assoc)\n dbsession=Session()\n st = Stream()\n st += read(fdir[i][0])\n st += read(fdir[i][1])\n st += read(fdir[i][2])\n st.merge(fill_value='interpolate')\n #print(st)\n for tr in st:\n if isinstance(tr.data, np.ma.masked_array):\n tr.data = tr.data.filled()\n st.detrend(type='linear')\n for tr in st:\n print(tr)\n scnl,picks,polarity,snr,uncert=picker.picks(tr)\n t_create=datetime.utcnow()\n for i in range(len(picks)):\n new_pick=tables1D.Pick(scnl,picks[i].datetime,polarity[i],snr[i],uncert[i],t_create)\n dbsession.add(new_pick)\n\ndef gpd_pick_add(dbsession=None,fileinput=None,inventory=None):\n filepath = fileinput\n with open(filepath) as fp:\n line = fp.readline()\n cnt = 1\n while line:\n try:\n line = fp.readline()\n #print(line)\n cnt += 1\n if len(line.split())>4:\n sta1 = line.split()[1]\n chan1 = line.split()[2]\n #print(sta1,chan1)\n #scnl.station = sta1\n net1 = line.split()[0]\n scnl = SCNL([sta1,chan1,net1])\n #print(scnl.channel)\n type1 = line.split()[3]\n scnl.phase = type1\n #print(scnl.phase)\n time1 = UTCDateTime(line.split()[4]).datetime\n else:\n sta1 = line.split()[0]\n chan1 = line.split()[1]\n #print(sta1,chan1)\n #scnl.station = sta1\n #net1 = line.split()[0]\n try:\n net1 = inventory.select(station=sta1)[0].code\n except:\n net1 = 'OK'\n pass\n scnl = SCNL([sta1,chan1,net1])\n #print(scnl.channel)\n type1 = line.split()[2]\n scnl.phase = type1\n #print(scnl.phase)\n time1 = UTCDateTime(line.split()[3]).datetime\n t_create=datetime.utcnow()\n new_pick=tables1D.Pick(scnl,time1,'',10,0.1,t_create)\n #tables1D.Pick.phase=type1\n dbsession.add(new_pick) # Add pick i to the database\n dbsession.commit() #\n except:\n pass\n\n# def gpd_pick_add(dbsession=None,fileinput=None):\n# filepath = fileinput\n# with open(filepath) as fp:\n# line = fp.readline()\n# cnt = 1\n# while line:\n# try:\n# print(\"Line {}: {}\".format(cnt, line.strip()))\n# line = fp.readline()\n# cnt += 1\n# sta1 = line.split()[1]\n# chan1 = line.split()[2]\n# #print(sta1,chan1)\n# #scnl.station = sta1\n# net1 = line.split()[0]\n# scnl = SCNL([sta1,chan1,'OK'])\n# #print(scnl.channel)\n# type1 = line.split()[3]\n# scnl.phase = type1\n# time1 = UTCDateTime(line.split()[4]).datetime\n# t_create=datetime.utcnow()\n \n# new_pick=tables1D.Pick(scnl,time1,'',10,0.1,t_create)\n# dbsession.add(new_pick) # Add pick i to the database\n# dbsession.commit() #\n# except:\n# pass\n \ndef get_chan1(stationfile):\n if len(list(filter(None, stationfile.split('/')[-1].split('.'))))==5:\n comp = list(filter(None, stationfile.split('/')[-1].split('.')))[3][2]\n else:\n comp = list(filter(None, stationfile.split('/')[-1].split('.')))[2][2]\n return comp\n\ndef get_chan3(stationfile):\n if len(list(filter(None, stationfile.split('/')[-1].split('.'))))==5:\n comp3 = list(filter(None, stationfile.split('/')[-1].split('.')))[3][0:3]\n else:\n comp3 = list(filter(None, stationfile.split('/')[-1].split('.')))[2][0:3]\n return comp3\n \ndef detection_continuous(dirname=None, project_folder=None, project_code=None, local=True, machine=True,single_date=None, latitude=None, longitude=None, max_radius=None):\n# starting = UTCDateTime(single_date.strftime(\"%Y\")+'-'+single_date.strftime(\"%m\")+'-'+single_date.strftime(\"%d\")+'T00:00:00.0')\n# stopping = starting + 86430\n starting = UTCDateTime(single_date.strftime(\"%Y\")+'-'+single_date.strftime(\"%m\")+'-'+single_date.strftime(\"%d\")+'T00:00:00.0')\n stopping = starting + 86430\n dir1 = project_folder+'/'+dirname\n #print(single_date.strftime(\"%Y%m%d\"))\n #print(dir1+'/1dassociator_'+project_code+'.db')\n if os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):\n os.remove(dir1+'/1dassociator_'+project_code+'.db')\n db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db' \n# if os.path.exists(dir1+'/tt_ex_1D_'+project_code+'.db'):\n# os.remove(dir1+'/tt_ex_1D_'+project_code+'.db')\n# db_tt='sqlite:///'+dir1+'/tt_ex_1D_'+project_code+'.db' # Traveltime database44.448,longitude=-115.136\n# print(db_tt)\n# if local:\n# inventory = build_tt_tables_local_directory(dirname=dirname,project_folder=project_folder,channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)\n# else:\n# inventory = build_tt_tables(lat1=latitude,long1=longitude,maxrad=max_radius,starting=starting, stopping=stopping, channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)\n engine_assoc=create_engine(db_assoc, echo=False)\n tables1D.Base.metadata.create_all(engine_assoc)\n Session=sessionmaker(bind=engine_assoc)\n session=Session()\n filelist = glob.glob(dir1+'/*mseed') or glob.glob(dir1+'/*SAC')\n stations = set()\n for file1 in filelist:\n station = file1.split('.')[1]\n net = file1.split('.')[0].split('/')[-1]\n netsta = net+'.'+station\n print(file1.split('.')[1])\n stations.add(netsta)\n #### create infile\n day_strings = []\n for stationin in stations:\n station3 = glob.glob(dir1+'/*'+stationin+'.*mseed') or glob.glob(dir1+'/*'+stationin+'.*SAC')\n station3a = [None,None,None]\n if len(station3)>3:\n #print(station3)\n ind1 = np.empty((len(station3),1))\n ind1[:] = np.nan\n for idxs, station1 in enumerate(station3):\n if get_chan3(station1) == 'HHZ':\n ind1[idxs] = 2\n elif get_chan3(station1) == 'HHN' or get_chan3(station1) == 'HH1':\n ind1[idxs] = 0\n elif get_chan3(station1) == 'HHE' or get_chan3(station1) == 'HH2':\n ind1[idxs] = 1\n #print(idxs)\n #if ind1:\n # station3a[ind1] = station1\n #ind2 = np.argwhere(~np.isnan(ind1))[:,0]\n for idxsa, ind2a in enumerate(ind1):\n if ~np.isnan(ind2a[0]):\n #print(ind2a)\n #print(station3a)\n station3a[int(ind2a[0])] = station3[idxsa]\n else:\n for station1 in station3:\n if get_chan1(station1) == 'Z':\n ind1 = 2\n elif get_chan1(station1) == 'N' or get_chan1(station1) == '1':\n ind1 = 0\n elif get_chan1(station1) == 'E' or get_chan1(station1) == '2':\n ind1 = 1\n #print(ind1)\n station3a[ind1] = station1\n if any(elem is None for elem in station3a):\n continue\n day_strings.append((station3a[0]+' '+station3a[1]+' '+station3a[2]))\n \n day_string = \"\\n\".join(day_strings)\n \n with open(dir1+'/dayfile.in', \"w\") as open_file:\n open_file.write(day_string)\n infile = dir1+'/dayfile.in'\n outfile = dir1+'/gpd_picks.out'\n #gpd_predict.py -V -P -I infile -O outflie\n #os.system(\"gpd_predict.py -V -P -I %s -O %s\")%(infile, outfile)\n #gpd_predict(inputfile=infile,outputfile=outfile)\n fileinassociate = outfile\n \n if local:\n inv = Inventory()\n dir1a = glob.glob(project_folder+'/'+dirname+'/*xml')\n for file1 in dir1a:\n inv1a = read_inventory(file1)\n inv.networks.extend(inv1a)\n else:\n fdsnclient=Client()\n inv=fdsnclient.get_stations(starttime=starting,endtime=stopping,latitude=latitude,longitude=longitude,maxradius=max_radius,channel='*HZ',level='channel')\n if machine:\n fullpath1 = pathgpd+'/gpd_predict.py'\n os.system(fullpath1+\" -V -P -I %s -O %s -F %s\" % (infile, outfile, pathgpd))\n gpd_pick_add(dbsession=session,fileinput=fileinassociate,inventory=inv)\n else:\n picker = fbpicker.FBPicker(t_long = 5, freqmin = 1, mode = 'rms', t_ma = 20, nsigma = 7, t_up = 0.7, nr_len = 2, nr_coeff = 2, pol_len = 10, pol_coeff = 10, uncert_coeff = 3)\n fb_pick(dbengine=engine_assoc,picker=picker,fileinput=infile)\n# assocXX=assoc1D.LocalAssociator(db_assoc, db_tt, max_km = maxkm, aggregation = 1, aggr_norm = 'L2', cutoff_outlier = 10, assoc_ot_uncert = 3, nsta_declare = 4, loc_uncert_thresh = 0.5)\n# print(\"aggregate\")\n# t0=datetime.utcnow()\n# # Identify candidate events (Pick Aggregation)\n# assocXX.id_candidate_events()\n# t1=datetime.utcnow()\n# print('Took '+str(t1-t0))\n# print(\"associate\")\n# # Associate events\n# assocXX.associate_candidates()\n# t2=datetime.utcnow()\n# print('Took '+str(t2-t1))\n# # Add singles stations to events\n# assocXX.single_phase()\n\ndef association_continuous(dirname=None, project_folder=None, project_code=None, maxdist = None, maxkm=None, single_date=None, local=True, latitude=None, longitude=None, max_radius=None):\n starting = UTCDateTime(single_date.strftime(\"%Y\")+'-'+single_date.strftime(\"%m\")+'-'+single_date.strftime(\"%d\")+'T00:00:00.0')\n stopping = starting + 86430\n\n dir1 = project_folder+'/'+dirname\n print(single_date.strftime(\"%Y%m%d\"))\n #print(dir1+'/1dassociator_'+project_code+'.db')\n# if os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):\n# os.remove(dir1+'/1dassociator_'+project_code+'.db')\n# db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db' \n if os.path.exists(dir1+'/tt_ex_1D_'+project_code+'.db'):\n os.remove(dir1+'/tt_ex_1D_'+project_code+'.db')\n db_tt='sqlite:///'+dir1+'/tt_ex_1D_'+project_code+'.db' # Traveltime database44.448,longitude=-115.136\n print(db_tt)\n if local:\n inventory = build_tt_tables_local_directory(dirname=dirname,project_folder=project_folder,channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)\n else:\n inventory = build_tt_tables(lat1=latitude,long1=longitude,maxrad=max_radius,starting=starting, stopping=stopping, channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)\n inventory.write(dir1+'/dailyinventory.xml',format=\"STATIONXML\")\n if not os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):\n db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'\n engine_assoc=create_engine(db_assoc, echo=False)\n tables1D.Base.metadata.create_all(engine_assoc)\n Session=sessionmaker(bind=engine_assoc)\n session=Session()\n gpd_pick_add(dbsession=session,fileinput=dir1+'/gpd_picks.out',inventory=inventory)\n\n db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'\n assocXX=assoc1D.LocalAssociator(db_assoc, db_tt, max_km = maxkm, aggregation = 1, aggr_norm = 'L2', cutoff_outlier = 10, assoc_ot_uncert = 3, nsta_declare = 4, loc_uncert_thresh = 0.2)\n print(\"aggregate\")\n t0=datetime.utcnow()\n # Identify candidate events (Pick Aggregation)\n assocXX.id_candidate_events()\n t1=datetime.utcnow()\n print('Took '+str(t1-t0))\n print(\"associate\")\n # Associate events\n assocXX.associate_candidates()\n t2=datetime.utcnow()\n print('Took '+str(t2-t1))\n # Add singles stations to events\n try:\n assocXX.single_phase()\n except:\n pass\n \n \n\n\n\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by the db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n \n return None\n\n\n\ndef hypo_station(project_folder=None, project_code=None):\n hypo71_string_sta = \"\"\n station_strings = []\n f1 = open(project_folder+'/'+'sta','w')\n #f2 = open(project_folder+'/'+'station.dat', 'w')\n #for stas in temp:\n files = sorted(glob.glob(project_folder+'/*/tt*'+project_code+'.db')) or glob.glob(project_folder+'/tt*'+project_code+'.db')\n #print(files)\n stas1 = pd.DataFrame()\n for dfilesta in files: \n conn1 = create_connection(dfilesta)\n with conn1:\n cur1 = conn1.cursor()\n cur1.execute(\"SELECT * FROM stations\")\n \n rows = cur1.fetchall()\n \n for row in rows:\n #print(row[0],row[1])\n #(row[0])\n df4 = pd.DataFrame()\n df4 = pd.DataFrame({'station': row[1], 'net':row[2],'latitude':row[4],'longitude':row[5],'elevation':row[6]}, index=[0])\n stas1=stas1.append(df4)\n stas1 = stas1.drop_duplicates()\n stas1 = stas1.reset_index(drop=True)\n print(stas1)\n for idx1 in stas1.index:\n stas = stas1.iloc[idx1]\n print(stas)\n# temp = stas1[stas1['station'].str.contains(sta_used)]\n# stas = temp.iloc[0]\n \n\n \n if len(stas['station'])>4:\n sta = stas['station'][1:]\n else:\n sta = stas['station']\n lon = stas['longitude']\n lon_deg = int(abs(lon))\n lon_min = (abs(lon) - abs(lon_deg)) * 60.\n lat = stas['latitude']\n lat_deg = int(abs(lat))\n lat_min = (abs(lat) - abs(lat_deg)) * 60.\n hem_NS = 'N'\n hem_EW = 'E'\n if lat < 0:\n hem_NS = 'S'\n if lon < 0:\n hem_EW = 'W'\n # hypo 71 format uses elevation in meters not kilometers\n ele = stas['elevation']\n hypo71_string_sta += fmt % (sta, lat_deg, lat_min, hem_NS,\n lon_deg, lon_min, hem_EW, ele)\n station_strings.append(\"%s %.6f %.6f %i\" % (sta, stas['latitude'], stas['longitude'], stas['elevation']))\n \n \n #print(hypo71_string_sta)\n station_string = \"\\n\".join(station_strings)\n with open(project_folder+'/'+'station.dat', \"w\") as open_file:\n open_file.write(station_string)\n f1.write(str(hypo71_string_sta))\n f1.close()\n\n\n\n\n \ndef select_all_associated(conn,f0):\n \"\"\"\n Query all rows in the associated table\n :param conn: the Connection object\n :return:\n \"\"\"\n cur1 = conn.cursor()\n cur1.execute(\"SELECT * FROM associated\")\n stalistall = set()\n rows = cur1.fetchall()\n dfs1 = pd.DataFrame()\n cat1 = Catalog()\n for rownum, row in enumerate(rows):\n #print(row)\n #(row[0])\n df4 = pd.DataFrame()\n df4 = pd.DataFrame({'Time': row[1], 'Lat':row[3],'Long':row[4]}, index=[0])\n dfs1=dfs1.append(df4)\n origin = Origin()\n origin.latitude = row[3]\n origin.longitude = row[4]\n origin.depth = 5000\n origin.time = row[1]\n origin.arrivals = []\n strday = row[1][0:4]+row[1][5:7]+row[1][8:10]\n cur1.execute('SELECT * FROM picks_modified WHERE assoc_id IN (?)',[int(row[0])])\n picks1a = sorted(cur1.fetchall())\n stas = []\n event = Event()\n evid = 'smi:local/Event/'+strday+str(rownum+1).zfill(3)\n orid = 'smi:local/Origin/'+strday+str(rownum+1).zfill(3)\n event.resource_id = ResourceIdentifier(id=evid)\n origin.resource_id = ResourceIdentifier(id=orid)\n\n event.resource_id = ResourceIdentifier(id='smi:local/Event/'+strday+str(rownum).zfill(3))\n origin.resource_id = ResourceIdentifier(id='smi:local/Origin/'+strday+str(rownum).zfill(3)+'_1')\n for pick1 in picks1a:\n \n #print(pick1)\n stream_id = WaveformStreamID(network_code=pick1[3], station_code=pick1[1], location_code=\"\", channel_code=pick1[2])\n p = Pick()\n p.time = pick1[5]\n p.phase_hint = pick1[6]\n p.waveform_id = stream_id\n p.evaluation_mode = 'automatic'\n pres_id = 'smi:local/Pick/'+strday+'/'+str(pick1[0])\n #res_id = ResourceIdentifier(prefix='Pick')\n #res_id.convert_id_to_quakeml_uri(authority_id='obspy.org')\n p.resource_id = ResourceIdentifier(id=pres_id)\n #print(p)\n \n a = Arrival()\n #a.time = pick1[5]\n a.phase = pick1[6]\n a.pick_id = p.resource_id\n ares_id = 'smi:local/Arrival/'+strday+'/'+str(pick1[0])\n #res_id = ResourceIdentifier(prefix='Pick')\n #res_id.convert_id_to_quakeml_uri(authority_id='obspy.org')\n a.resource_id = ResourceIdentifier(id=ares_id)\n a.time_weight = 1.0\n #print(a)\n \n #origin.picks.append(p)\n sta1 = pick1[1]\n stas.append(sta1)\n stalistall.add(sta1)\n origin.arrivals.append(a)\n event.picks.append(p)\n event.origins.append(origin)\n cat1.append(event)\n #print(stalistall)\n stalist = list(set(stas))\n for states in stalist:\n hypo71_string = \"\"\n numP = -9\n numS = -9\n #print(states)\n for num, line in enumerate(picks1a):\n if states in line and 'P' in line:\n numP = num\n if states in line and 'S' in line:\n numS = num\n# if numP > -1 and numS < -1:\n# #print('just P'+str(numP))\n# if numP < -1 and numS > -1:\n# #print('just S')\n# if numP > -1 and numS > -1:\n# print('both'+str(numP)+' '+str(numS))\n if len(states)>4:\n sta = states[1:]\n else:\n sta = states\n if numP > -1:\n pick = picks1a[numP]\n \n \n \n t = UTCDateTime(pick[5])\n hundredth = int(round(t.microsecond / 1e4))\n if hundredth == 100:\n t_p = t + 1\n hundredth = 0\n else:\n t_p = t\n date = t_p.strftime(\"%y%m%d%H%M%S\") + \".%02d\" % hundredth\n onset = 'I'\n polarity = '?'\n weight = 1\n #print(sta,onset,polarity,weight,date)\n hypo71_string += fmtP % (sta, onset, polarity, weight, date)\n #f0.write(str(hypo71_string))\n \n #print(hypo71_string)\n if numP > -1 and numS > -1:\n pick = picks1a[numS]\n #t = UTCDateTime(pick[5])\n t2 = UTCDateTime(pick[5])\n # if the S time's absolute minute is higher than that of the\n # P pick, we have to add 60 to the S second count for the\n # hypo 2000 output file\n # +60 %60 is necessary if t.min = 57, t2.min = 2 e.g.\n mindiff = (t2.minute - t.minute + 60) % 60\n abs_sec = t2.second + (mindiff * 60)\n hundredth = int(round(t2.microsecond / 1e4))\n if hundredth == 100:\n abs_sec += 1\n hundredth = 0\n date2 = \"%s.%02d\" % (abs_sec, hundredth)\n hundredth = int(round(t.microsecond / 1e4))\n if hundredth == 100:\n t_p = t + 1\n hundredth = 0\n else:\n t_p = t\n date = t_p.strftime(\"%y%m%d%H%M%S\") + \".%02d\" % hundredth\n onset = 'I'\n polarity = '?'\n weight = 1\n #print(sta,onset,polarity,weight,date)\n hypo71_string += fmtS % (date2, onset, polarity,weight)\n \n else:\n hypo71_string += \"\\n\"\n #f0.write(\"\\n\")\n f0.write(str(hypo71_string))\n #cur1.execute('SELECT * FROM picks WHERE id IN (?)',[int(pick[0])])\n #extrapick = cur1.fetchall()\n f0.write(\"\\n\")\n \n \n return dfs1, stalistall, cat1, f0\n\ndef combine_associated(project_folder=None, project_code=None, catalog_year=False, year=None, eventmode=False):\n #files = sorted(glob.glob('/data/tx/ContWaveform/*/1dass*'++'.db'))\n #files = [f for f in os.listdir(dirdata) if os.path.isfile(os.path.join(dirdata, f))]\n #dir1 = project_folder+'/'+dirname\n \n hypo_station(project_folder, project_code)\n files = sorted(glob.glob(project_folder+'/*/1dass*'+project_code+'.db'))\n if catalog_year:\n files = sorted(glob.glob(project_folder+'/'+str(year)+'*/1dass*'+project_code+'.db'))\n if eventmode:\n files = sorted(glob.glob(project_folder+'/1dass*'+project_code+'.db'))\n f0 = open(project_folder+'/pha_'+project_code,'w')\n dfs2 = pd.DataFrame()\n stalistall1 = []\n cat = Catalog()\n for dfile in files: \n # create a database connection\n print(dfile)\n conn = create_connection(dfile)\n \n with conn:\n #print(\"1. Query task by priority:\")\n #select_task_by_priority(conn,1)\n \n print('Day '+dfile[-6:-3])\n #try:\n \n dfs1,stalistall,cat1,f0 = select_all_associated(conn,f0)\n cat.extend(cat1)\n for stas1 in stalistall:\n if stas1 not in stalistall1:\n stalistall1.append(stas1)\n dfs2 = dfs2.append(dfs1)\n# except:\n# pass\n f0.close()\n if catalog_year:\n cat.write(project_folder+'/'+project_code+'_'+str(year)+'_cat.xml',format=\"QUAKEML\")\n else:\n if not eventmode:\n cat.write(project_folder+'/'+project_code+'_cat.xml',format=\"QUAKEML\")\n return cat, dfs2\n\n\n\n\n\n#\n# index0=int(round((picks[i]-self.tr.stats.starttime)/dt,0))\n# index=index0\n# \n# # roll forward index+=1\n# while True:\n# if index>=self.stats.npts-1-2:\n# break\n# elif (self.tr[index+1]-self.tr[index])*(self.tr[index+2]-self.tr[index+1])>0:\n# index+=1\n# else:\n# break\n# \n# # notice index+1, rolling stop one point before extreme, compare with std to avoid very small \n# if self.tr[index+1] - self.tr[index0] > 0 and abs(self.tr[index+1] - self.tr[index0]) > self.picker.pol_coeff * np.std(self.tr[index0 - self.picker.pol_len: index0]):\n# polarity='C'\n# elif self.tr[index+1] - self.tr[index0] < 0 and abs(self.tr[index+1] - self.tr[index0]) > self.picker.pol_coeff * np.std(self.tr[index0 - self.picker.pol_len: index0]):\n# polarity='D'\n# else: \n# polarity=''\ndef polarity(tr,pickP=None):\n dt=tr.stats.delta\n #t = np.arange(0, tr.stats.npts/tr.stats.sampling_rate, dt) \n index0=int(round((pickP-tr.stats.starttime)/dt,0))\n index=index0\n pol_coeff = 5\n pol_len = 5\n polarity = 'undecidable'\n while True:\n if index>=tr.stats.npts-1-2:\n break\n elif (tr[index+1]-tr[index])*(tr[index+2]-tr[index+1])>0:\n index+=1\n else:\n break\n if tr[index+1] - tr[index0] > 0 and abs(tr[index+1] - tr[index0]) > pol_coeff * np.std(tr[index0 - pol_len: index0]):\n polarity='positive'\n elif tr[index+1] - tr[index0] < 0 and abs(tr[index+1] - tr[index0]) > pol_coeff * np.std(tr[index0 - pol_len: index0]):\n polarity='negative'\n else: \n polarity='undecidable'\n return polarity\n \n \ndef magnitude_quakeml(cat=None, project_folder=None,plot_event=False,eventmode=False):\n# PAZ_WA = {'poles': [-6.283 + 4.7124j, -6.283 - 4.7124j],\n# 'zeros': [0 + 0j], 'gain': 1.0, 'sensitivity': 2080}\n paz_wa = {'sensitivity': 2080, 'zeros': [0j], 'gain': 1,'poles': [-6.2832 - 4.7124j, -6.2832 + 4.7124j]}\n \n print('Computing magnitudes')\n# \n# #inventory = client.get_stations(starttime=starttime, endtime=endtime, network=\"*\", sta=tr.stats.station, loc=\"*\", channel=tr.stats.channel,level=\"response\")\n client = Client()\n# starttime = UTCDateTime(start)\n# endtime = UTCDateTime(end)\n\n ##import datetime\n #inva = client.get_stations(latitude=36.5,longitude=-98.9,minradius=0.0, maxradius=5,starttime=starttime,endtime=endtime,level=\"station\")\n #invsta = client.get_stations(latitude=31.699,longitude=-104.053, maxradius=4,starttime=starttime,endtime=endtime,includeavailability=True,includerestricted=False,level=\"response\")\n# if download_inventory:\n# invsta = client.get_stations(latitude=31.699,longitude=-104.053, maxradius=4,starttime=starttime,endtime=endtime,includeavailability=True,includerestricted=False,level=\"response\")\n# else:\n# inv = Inventory()\n# dir1a = glob.glob(project_folder+'/*/*xml')\n# for file1 in dir1a:\n# inv1a = read_inventory(file1)\n# inv.networks.extend(inv1a)\n #cat3 = cat.filter(\"time > 2020-03-25T03:10\",\"time < 2020-03-25T03:50\")\n for event in cat:\n origin = event.origins[0]\n print(origin)\n event_lat = origin.latitude\n event_lon = origin.longitude\n strday = str(origin.time.year).zfill(2)+str(origin.time.month).zfill(2)+str(origin.time.day).zfill(2)\n if eventmode:\n strday = str(project_folder.split('/')[-1])\n # strday = str(origin.time.year).zfill(2)+str(origin.time.month).zfill(2)+str(origin.time.day).zfill(2)\n print(strday)\n strdaytime = strday+str(origin.time.hour).zfill(2)+str(origin.time.minute).zfill(2)[0]\n mags = []\n mags_iaspei = []\n\n st2 = Stream()\n for idx1, pick in enumerate(event.picks):\n if pick.phase_hint == 'S':\n ### make Amplitude\n try:\n st3 = read(project_folder+'/'+strday+'*/'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'*mseed',debug_headers=True)\n except:\n try:\n st3 = read(project_folder+'/'+strday+'*/*.'+pick.waveform_id.station_code+'*SAC',debug_headers=True)\n except:\n #st3 = read(project_folder+'/scratch/'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'*mseed',debug_headers=True)\n st3 = read(project_folder+'/'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'*mseed',debug_headers=True)\n# try:\n# st3 = read(project_folder+'/'+strday+'*/*.'+pick.waveform_id.station_code+'*SAC',debug_headers=True)\n# except:\n# st3 = read(project_folder+'/'+strdaytime+'*/'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'*mseed',debug_headers=True)\n pass\n\n # pazs = glob.glob('/data/tx/ContWaveform/'+strday+'/SACPZ.'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'*[EN12]')\n #st = read(project_folder+'/'+strday+'*/'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'*[EN12]*.SAC',debug_headers=True)\n try:\n st3.merge(fill_value='interpolate')\n print(st3)\n for tr in st3:\n if isinstance(tr.data, np.ma.masked_array):\n tr.data = tr.data.filled()\n st = st3.select(channel='[HB]H[EN12]')\n for tr in st3:\n inventory_local = glob.glob(project_folder+'/'+strday+'*/'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'.xml')\n if len(inventory_local)>0:\n inv = read_inventory(inventory_local[0])\n else:\n if len(glob.glob(project_folder+'/'+strday+'*/'+pick.waveform_id.network_code+'.'+pick.waveform_id.station_code+'.xml'))>0:\n inv0 = read_inventory(project_folder+'/'+strday+'*/dailyinventory.xml')\n inv = inv0.select(network=pick.waveform_id.network_code, station=pick.waveform_id.station_code, time=origin.time)\n else:\n print('Getting response from DMC')\n starttime = UTCDateTime(origin.time-10)\n endtime = UTCDateTime(origin.time+10)\n inv = client.get_stations(starttime=starttime, endtime=endtime, network=\"*\", sta=tr.stats.station, loc=\"*\", channel=tr.stats.channel,level=\"response\")\n # paz = [x for x in pazs if tr.stats.channel in x]\n # attach_paz(tr, paz[0])\n #inv = client.get_stations(starttime=starttime, endtime=endtime, network=\"*\", sta=tr.stats.station, loc=\"*\", channel=tr.stats.channel,level=\"response\")\n tr.stats.network = inv[0].code\n tr.stats.location = inv[0][0][0].location_code\n pre_filt = (0.05, 0.06, 30.0, 35.0)\n tr.trim(pick.time-30, pick.time+120)\n \n \n #tr.demean()\n tr.detrend()\n tr.remove_response(inventory=inv, output='VEL', pre_filt=pre_filt, zero_mean=True)\n #tr.data = seis_sim(tr.data, tr.stats.sampling_rate,paz_remove=None, paz_simulate=paz_wa, water_level=10)\n tr.simulate(paz_simulate=paz_wa, water_level=10)\n \n \n #tr = tr.filter('bandpass', freqmin=fminbp, freqmax=fmaxbp, zerophase=True)\n #st.trim(pick.time-5,pick.time+10)\n tr1 = st3.select(channel='[EHB]HZ')[0]\n \n sta_lat = inv[0][0].latitude \n sta_lon = inv[0][0].longitude\n epi_dist, az, baz = gps2dist_azimuth(event_lat, event_lon, sta_lat, sta_lon)\n epi_dist = epi_dist / 1000\n tr1.stats.distance = gps2dist_azimuth(event_lat, event_lon, sta_lat, sta_lon)[0]\n tr1.trim(pick.time-20,pick.time+60)\n st2 += tr1\n st.trim(pick.time-1,pick.time+5)\n ampls = (max(abs(st[0].data)), max(abs(st[1].data)))\n for idx2,ampl in enumerate(ampls):\n\n amp = Amplitude()\n res_id = 'smi:local/Amplitude/'+strday+'/'+str(10*idx2+idx1)\n #res_id = ResourceIdentifier(prefix='Amplitude')\n #res_id.convert_id_to_quakeml_uri(authority_id='obspy.org')\n amp.resource_id = ResourceIdentifier(id=res_id)\n amp.pick_id = pick.resource_id\n amp.waveform_id = pick.waveform_id\n amp.type = 'ML'\n amp.generic_amplitude = ampl\n amp.evaluation_mode = 'automatic'\n \n if epi_dist < 60:\n a = 0.018\n b = 2.17\n else:\n a = 0.0038\n b = 3.02\n ml = np.log10(ampl * 1000) + a * epi_dist + b\n \n ml_iaspei = np.log10(ampl*1e6)+1.11*np.log10(epi_dist) + 0.00189*epi_dist - 2.09\n print(ml, ml_iaspei)\n\n if epi_dist < 160:\n mags.append(ml)\n mags_iaspei.append(ml_iaspei)\n #### make StationMagnitude\n stamag = StationMagnitude()\n res_id = 'smi:local/StationMagnitude/'+strday+'/'+str(10*idx2+idx1)\n #res_id = ResourceIdentifier(prefix='StationMagnitude')\n #res_id.convert_id_to_quakeml_uri(authority_id='obspy.org')\n stamag.resource_id = ResourceIdentifier(id=res_id)\n stamag.origin_id = origin.resource_id\n stamag.waveform_id = pick.waveform_id\n stamag.mag = ml\n stamag.station_magnitude_type = 'ML'\n stamag.amplitude_id = amp.resource_id\n ## add them to the event\n event.station_magnitudes.append(stamag)\n event.amplitudes.append(amp)\n except:\n print('Something went wrong here')\n pass\n \n for pick in event.picks:\n if pick.phase_hint == 'P':\n tr = st2.select(station=pick.waveform_id.station_code)\n try:\n tr = tr[0]\n pol = polarity(tr,pick.time)\n pick.polarity = pol\n print(pol)\n except:\n pass\n \n \n \n \n \n netmag = np.median(mags_iaspei)\n try:\n m = Magnitude()\n m.mag = netmag\n m.mag_errors = {\"uncertainty\": np.std(mags_iaspei)}\n m.magnitude_type = 'ML'\n m.origin_id = origin.resource_id\n meth_id = 'smi:local/median'\n m.method_id = meth_id\n m.station_count = len(mags_iaspei)\n m_id = 'smi:local/Magnitude/'+strday+'/'+str(idx1)\n #m_id = ResourceIdentifier(prefix='StationMagnitude')\n #m_id.convert_id_to_quakeml_uri(authority_id='obspy.org')\n m.resource_id = m_id\n event.magnitudes.append(m)\n \n \n \n event.preferred_magnitude_id = event.magnitudes[0].resource_id\n event.preferred_origin_id = event.origins[0].resource_id\n \n \n \n if plot_event:\n dir1a = glob.glob(project_folder+'/'+strday+'*')\n filename = dir1a[0]+'/'+strdaytime\n fig = plt.figure()\n st2.filter('highpass', freq=.1, zerophase=True)\n st2.plot(type='section', scale=2,plot_dx=100e3, recordlength=50,\n time_down=True, linewidth=.25, grid_linewidth=.25, show=False, \n outfile=filename,fig=fig)\n plt.close()\n except:\n print('Magnitude failed')\n pass\n if not eventmode:\n cat.write(project_folder+'/cat.xml',format=\"QUAKEML\")\n return cat\n\n\n\ndef single_event_xml(catalog=None,project_folder=None, format=\"QUAKEML\"):\n xmlspath = project_folder+'/'+format.lower()\n if not os.path.exists(xmlspath):\n os.makedirs(xmlspath)\n for ev in catalog:\n filename = str(ev.resource_id).split('/')[-1] + \".xml\"\n ev.write(xmlspath+'/'+filename, format=format)\n \ndef detection_assocation_event(project_folder=None, project_code=None, maxdist = None, maxkm=None, local=True, machine=True, latitude=None, longitude=None, max_radius=None, approxorigintime=None,downloadwaveforms=True):\n approxotime = UTCDateTime(approxorigintime)\n dirname = str(approxotime.year)+str(approxotime.month).zfill(2)+str(approxotime.day).zfill(2)+str(approxotime.hour).zfill(2)+str(approxotime.minute).zfill(2)+str(approxotime.second).zfill(2)\n #starting = UTCDateTime(single_date.strftime(\"%Y\")+'-'+single_date.strftime(\"%m\")+'-'+single_date.strftime(\"%d\")+'T00:00:00.0') - \n starting = approxotime - 60\n stopping = approxotime + 60\n dir1 = project_folder+'/'+dirname\n \n if downloadwaveforms:\n download_mseed_event_radial(dirname=dirname, project_folder=project_folder, starting=starting, stopping = stopping, lat1=latitude, lon1=longitude, maxrad=max_radius)\n #print(single_date.strftime(\"%Y%m%d\"))\n #print(dir1+'/1dassociator_'+project_code+'.db')\n if os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):\n os.remove(dir1+'/1dassociator_'+project_code+'.db')\n db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'\n engine_assoc=create_engine(db_assoc, echo=False)\n tables1D.Base.metadata.create_all(engine_assoc)\n Session=sessionmaker(bind=engine_assoc)\n session=Session()\n filelist = glob.glob(dir1+'/*mseed') or glob.glob(dir1+'/*SAC')\n stations = set()\n for file1 in filelist:\n station = file1.split('.')[1]\n net = file1.split('.')[0].split('/')[-1]\n netsta = net+'.'+station\n print(file1.split('.')[1])\n stations.add(netsta)\n #### create infile\n day_strings = []\n for stationin in stations:\n station3 = glob.glob(dir1+'/*'+stationin+'.*mseed') or glob.glob(dir1+'/*'+stationin+'.*SAC')\n station3a = [None,None,None]\n if len(station3)>3:\n #print(station3)\n ind1 = np.empty((len(station3),1))\n ind1[:] = np.nan\n for idxs, station1 in enumerate(station3):\n if get_chan3(station1) == 'HHZ':\n ind1[idxs] = 2\n elif get_chan3(station1) == 'HHN' or get_chan3(station1) == 'HH1':\n ind1[idxs] = 0\n elif get_chan3(station1) == 'HHE' or get_chan3(station1) == 'HH2':\n ind1[idxs] = 1\n #print(idxs)\n #if ind1:\n # station3a[ind1] = station1\n #ind2 = np.argwhere(~np.isnan(ind1))[:,0]\n for idxsa, ind2a in enumerate(ind1):\n if ~np.isnan(ind2a[0]):\n #print(ind2a)\n #print(station3a)\n station3a[int(ind2a[0])] = station3[idxsa]\n else:\n for station1 in station3:\n if get_chan1(station1) == 'Z':\n ind1 = 2\n elif get_chan1(station1) == 'N' or get_chan1(station1) == '1':\n ind1 = 0\n elif get_chan1(station1) == 'E' or get_chan1(station1) == '2':\n ind1 = 1\n #print(ind1)\n station3a[ind1] = station1\n if any(elem is None for elem in station3a):\n continue\n day_strings.append((station3a[0]+' '+station3a[1]+' '+station3a[2]))\n \n day_string = \"\\n\".join(day_strings)\n \n with open(dir1+'/dayfile.in', \"w\") as open_file:\n open_file.write(day_string)\n infile = dir1+'/dayfile.in'\n outfile = dir1+'/gpd_picks.out'\n fileinassociate = outfile\n \n if local:\n inv = Inventory()\n dir1a = glob.glob(project_folder+'/'+dirname+'/*xml')\n for file1 in dir1a:\n inv1a = read_inventory(file1)\n inv.networks.extend(inv1a)\n else:\n fdsnclient=Client()\n inv=fdsnclient.get_stations(starttime=starting,endtime=stopping,latitude=latitude,longitude=longitude,maxradius=max_radius,channel='*HZ',level='channel')\n if machine:\n fullpath1 = pathgpd+'/gpd_predict.py'\n os.system(fullpath1+\" -V -P -I %s -O %s -F %s\" % (infile, outfile, pathgpd))\n gpd_pick_add(dbsession=session,fileinput=fileinassociate,inventory=inv)\n else:\n picker = fbpicker.FBPicker(t_long = 5, freqmin = 1, mode = 'rms', t_ma = 20, nsigma = 7, t_up = 0.7, nr_len = 2, nr_coeff = 2, pol_len = 10, pol_coeff = 10, uncert_coeff = 3)\n fb_pick(dbengine=engine_assoc,picker=picker,fileinput=infile) # starting = UTCDateTime(single_date.strftime(\"%Y\")+'-'+single_date.strftime(\"%m\")+'-'+single_date.strftime(\"%d\")+'T00:00:00.0')\n # stopping = starting + 86430\n\n\n# starting = UTCDateTime(single_date.strftime(\"%Y\")+'-'+single_date.strftime(\"%m\")+'-'+single_date.strftime(\"%d\")+'T00:00:00.0')\n# stopping = starting + 86430\n\n dir1 = project_folder+'/'+dirname\n \n if os.path.exists(dir1+'/tt_ex_1D_'+project_code+'.db'):\n os.remove(dir1+'/tt_ex_1D_'+project_code+'.db')\n db_tt='sqlite:///'+dir1+'/tt_ex_1D_'+project_code+'.db' # Traveltime database44.448,longitude=-115.136\n print(db_tt)\n if local:\n inventory = build_tt_tables_local_directory(dirname=dirname,project_folder=project_folder,channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)\n else:\n inventory = build_tt_tables(lat1=latitude,long1=longitude,maxrad=max_radius,starting=starting, stopping=stopping, channel_codes=['EH','BH','HH'],db=db_tt,maxdist=maxdist,source_depth=5.)\n inventory.write(dir1+'/dailyinventory.xml',format=\"STATIONXML\")\n if not os.path.exists(dir1+'/1dassociator_'+project_code+'.db'):\n db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'\n engine_assoc=create_engine(db_assoc, echo=False)\n tables1D.Base.metadata.create_all(engine_assoc)\n Session=sessionmaker(bind=engine_assoc)\n session=Session()\n gpd_pick_add(dbsession=session,fileinput=dir1+'/gpd_picks.out',inventory=inventory)\n\n db_assoc='sqlite:///'+dir1+'/1dassociator_'+project_code+'.db'\n assocXX=assoc1D.LocalAssociator(db_assoc, db_tt, max_km = maxkm, aggregation = 1, aggr_norm = 'L2', cutoff_outlier = 10, assoc_ot_uncert = 3, nsta_declare = 4, loc_uncert_thresh = 0.2)\n print(\"aggregate\")\n t0=datetime.utcnow()\n # Identify candidate events (Pick Aggregation)\n assocXX.id_candidate_events()\n t1=datetime.utcnow()\n print('Took '+str(t1-t0))\n print(\"associate\")\n # Associate events\n assocXX.associate_candidates()\n t2=datetime.utcnow()\n print('Took '+str(t2-t1))\n # Add singles stations to events\n try:\n assocXX.single_phase()\n except:\n pass\n \n cat, dfs = combine_associated(project_folder=dir1, project_code=project_code, eventmode=True)\n cat = magnitude_quakeml(cat=cat, project_folder=dir1,plot_event=True, eventmode=True)\n #cat.write('catalog_idaho.xml',format='QUAKEML')\n #single_event_xml(cat,dir1,\"QUAKEML\")\n for idx1, ev in enumerate(cat):\n filename = dirname+'_'+str(idx1) + \".xml\"\n ev.write(project_folder+'/'+filename, format='QUAKEML')\n \n \ndef simple_cat_df(cat=None):\n times = []\n lats = []\n lons = []\n deps = []\n magnitudes = []\n magnitudestype = []\n resourceid = []\n for event in cat:\n if len(event.origins) != 0:\n origin1 = event.preferred_origin() or event.origins[0]\n times.append(origin1.time.datetime)\n lats.append(origin1.latitude)\n lons.append(origin1.longitude)\n deps.append(origin1.depth)\n if event.preferred_magnitude() is not None:\n magnitudes.append(event.preferred_magnitude().mag)\n magnitudestype.append(event.preferred_magnitude().magnitude_type)\n else:\n if len(event.magnitudes)>0:\n magnitudes.append(event.magnitudes[0].mag)\n magnitudestype.append(event.magnitudes[0].magnitude_type)\n else:\n magnitudes.append(np.nan)\n magnitudestype.append(np.nan)\n resourceid.append(event.resource_id)\n catdf1 = pd.DataFrame({'latitude':lats,'longitude':lons, 'depth':deps,'magnitude':magnitudes,'type':magnitudestype,'id':resourceid}, index = times)\n return catdf1\n\ndef catdf_narrowbounds(catdf=None,lat_a=None,lat_b=None,lon_a=None,lon_b=None):\n catdf = catdf[(catdf['latitude']>lat_a) & (catdf['latitude']<lat_b) & (catdf['longitude']>lon_a) & (catdf['longitude']<lon_b)]\n return catdf\n\n#\n#\ndef quakeml_to_hypodd(cat=None, project_folder=None, project_code=None):\n #catdf = simple_cat_df(cat)\n phase_dat_file = project_folder+'/'+project_code+'.pha'\n #for idx0, t0 in enumerate(catdf.index):\n #stations = []\n stations = set()\n\n event_strings = [] \n for idx1, event in enumerate(cat):\n evo = event.origins[0].time\n #evid = catdf['event_id'][idx1]\n #otime = UTCDateTime(evo)\n #try:\n \n origin = event.preferred_origin() or event.origins[0]\n mag1 = event.preferred_magnitude() or event.magnitudes[0]\n magpref = mag1.mag\n\n \n \n depth_error = 0\n longitude_error = 0\n latitude_error = 0\n\n \n string = \"# {year} {month} {day} {hour} {minute} \" + \\\n \"{second:.6f} {latitude:.6f} {longitude:.6f} \" + \\\n \"{depth:.4f} {magnitude:.6f} {horizontal_error:.6f} \" + \\\n \"{depth_error:.6f} {travel_time_residual:.6f} {event_id}\"\n event_string = string.format(year=origin.time.year,\n month=origin.time.month,\n day=origin.time.day,\n hour=origin.time.hour,\n minute=origin.time.minute,\n # Seconds + microseconds\n second=float(origin.time.second) +\n (origin.time.microsecond / 1e6),\n latitude=origin.latitude,\n longitude=origin.longitude,\n # QuakeML depth is in meters. Convert to km.\n depth=origin.depth / 1000.0,\n magnitude= magpref,\n horizontal_error=max(\n [latitude_error,\n longitude_error]),\n depth_error= depth_error,\n travel_time_residual=0,\n event_id=idx1)\n event_strings.append(event_string)\n for _i, arrv in enumerate(origin.arrivals):\n pick = arrv.pick_id.get_referred_object()\n stations.add(pick.waveform_id.station_code)\n #print(pick.polarity)\n # Only P and S phases currently supported by HypoDD.\n if pick.phase_hint.upper() != \"P\" and pick.phase_hint.upper() != \"S\":\n continue\n string = \"{station_id} {travel_time:.6f} {weight:.2f} {phase}\"\n travel_time = pick.time - origin.time\n # Simple check to assure no negative travel times are used.\n if travel_time < 0:\n msg = \"Negative absolute travel time. \" + \\\n \"{phase} phase pick for event {event_id} at \" + \\\n \"station {station_id} will not be used.\"\n msg = msg.format(\n phase=pick.phase_hint,\n event_id=evid,\n station_id=pick.waveform_id.station_code)\n print(msg)\n continue\n phase_weighting = lambda sta_id, ph_type, time, uncertainty: 1.0\n weight = phase_weighting(pick.waveform_id.station_code, pick.phase_hint.upper(),\n pick.time,\n arrv.time_residual)\n pick_string = string.format(\n station_id=pick.waveform_id.station_code,\n travel_time=travel_time,\n weight=weight,\n phase=pick.phase_hint.upper())\n event_strings.append(pick_string)\n event_string = \"\\n\".join(event_strings)\n# except:\n# print('Some error occurred????', evo)\n# pass\n # Write the phase.dat file.\n with open(phase_dat_file, \"w\") as open_file:\n open_file.write(event_string)\n \n\n \n# station_dat_file = project_folder+'/'+'station.dat'\n# \n# #station_strings = []\n# #for key, value in self.stations.iteritems():\n# # station_strings.append(\"%s %.6f %.6f %i\" % (key, value[\"latitude\"],\n# # value[\"longitude\"], value[\"elevation\"]))\n# #station_string = \"\\n\".join(station_strings)\n# #with open(station_dat_file, \"w\") as open_file:\n# # open_file.write(station_string)\n# #self.log(\"Created station.dat input file.\")\n# \n# \n# starttime = UTCDateTime(\"2010-01-01T00:00:00.000\")\n# endtime = UTCDateTime(\"2022-01-01T00:00:00.000\")\n# #line = [(-98.15, 35.88),(-98.05, 35.8)] # Cushing area\n# \n# client = Client('IRIS')\n# inva = client.get_stations(starttime=starttime, endtime=endtime,network=\"*\", loc=\"*\", channel=\"*\",minlatitude=minlat, maxlatitude=maxlat,minlongitude=minlon, maxlongitude=maxlon,level=\"station\")\n# station_strings = []\n# for sta in stations:\n# print(sta)\n# inva1 = inva.select(station=sta)\n# if len(inva1.networks) > 0:\n# station_strings.append(\"%s %.6f %.6f %i\" % (sta, inva1[0][0].latitude, inva1[0][0].longitude, inva1[0][0].elevation))\n# #inva1[0][0].latitude\n# station_string = \"\\n\".join(station_strings)\n# with open(station_dat_file, \"w\") as open_file:\n# open_file.write(station_string)\n\n\n\n\ndef plot_hypodd_catalog(file=None): \n catdfr = pd.read_csv(file,delimiter=r\"\\s+\")\n catdfr = catdfr.dropna()\n catdfr = catdfr.reset_index(drop=True)\n #rutc = np.zeros((len(catdfr.index),1))\n rutc = []\n for i in range(0,len(catdfr.index)):\n rutc.append(UTCDateTime(int(catdfr.iloc[i,10]),int(catdfr.iloc[i,11]),int(catdfr.iloc[i,12]),int(catdfr.iloc[i,13]),int(catdfr.iloc[i,14]),catdfr.iloc[i,15]))\n \n catdfr['rutc'] = rutc\n catdfr.sort_values(by=['rutc'], inplace=True)\n catdfr = catdfr.reset_index(drop=True)\n \n \n \n \n \n from mpl_toolkits.basemap import Basemap\n # 1. Draw the map background\n #fig = plt.figure(figsize=(8, 8))\n m = Basemap(projection='lcc', resolution='h', \n lat_0=31.66, lon_0=-104,\n width=1E6, height=.6E6)\n #m.shadedrelief()\n m.drawcoastlines(color='gray')\n m.drawcountries(color='gray')\n #m.drawcounties(color='gray')\n m.drawstates(color='gray')\n \n # 2. scatter city data, with color reflecting population\n # and size reflecting area\n m.scatter(catdfr.iloc[:,2].values,catdfr.iloc[:,1].values,s=catdfr.iloc[:,16].values**3*8,c=catdfr.index,marker='o',alpha=0.5,latlon=True)\n \n #m.scatter(catdfo.iloc[:,2].values,catdfo.iloc[:,1].values,s=catdfo.iloc[:,16].values**3*10,c=catdfo.index,marker='o',alpha=0.5,latlon=True)\n \n \n \n cbar = plt.colorbar()\n N_TICKS=8\n indexes = [catdfr['rutc'].iloc[i].strftime('%Y-%m-%d') for i in np.linspace(0,catdfr.shape[0]-1,N_TICKS).astype(int)] \n \n #indexes = [catdfr.index[i].strftime('%Y-%m-%d') for i in np.linspace(0,catdfr.shape[0]-1,N_TICKS).astype(int)] \n cbar.ax.set_yticklabels(indexes)\n plt.show()\n plt.savefig('hypoDDmap.png')\n\nif __name__ == \"__main__\":\n easyQuake()\n\n\n\n\n\n#start_date = date(2013, 12, 1)\n#end_date = date(2013, 12, 2)\n##end_date = date(2018, 1, 2)\n#project_code = 'eq'\n#project_folder = '/data/EasyQuake'\n#for single_date in daterange(start_date, end_date):\n# print(single_date.strftime(\"%Y-%m-%d\"))\n# dirname = single_date.strftime(\"%Y%m%d\")\n #tempdate = glob.glob('/scratch/antarctica/TempWaveform/'+single_date.strftime(\"%Y%m%d\")+'*/*SAC.bp')\n #os.path.basename(temp[0])\n #stanames = []\n #for f in tempdate:\n #print(f)\n # stanames.append(os.path.basename(f).split(\".\")[1]+'.'+os.path.basename(f).split(\".\")[2])\n #stachan_uniq = set(stanames)\n", "sub_path": "easyQuake/easyQuake.py", "file_name": "easyQuake.py", "file_ext": "py", "file_size_in_byte": 65795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "phasepapy.fbpicker.__file__", "line_number": 9, "usage_type": "attribute"}, {"api_name": "phasepapy.fbpicker", "line_number": 9, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 23, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 24, "usage_type": "call"}, {"api_name": "stat.S_IEXEC", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 78, "usage_type": "call"}, {"api_name": "obspy.UTCDateTime", "line_number": 98, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.RectangularDomain", "line_number": 103, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.Restrictions", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 108, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.MassDownloader", "line_number": 111, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.RectangularDomain", "line_number": 118, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.Restrictions", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 123, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.MassDownloader", "line_number": 126, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.CircularDomain", "line_number": 132, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.Restrictions", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 138, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.mass_downloader.MassDownloader", "line_number": 141, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 153, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.BaseTT1D.metadata.create_all", "line_number": 154, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.BaseTT1D", "line_number": 154, "usage_type": "attribute"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 154, "usage_type": "name"}, {"api_name": "obspy.clients.fdsn.Client", "line_number": 157, "usage_type": "call"}, {"api_name": "re.match", "line_number": 166, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.Station1D", "line_number": 171, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 171, "usage_type": "name"}, {"api_name": "obspy.taup.TauPyModel", "line_number": 177, "usage_type": "call"}, {"api_name": "obspy.taup", "line_number": 177, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 179, "usage_type": "call"}, {"api_name": "obspy.geodetics.kilometer2degrees", "line_number": 181, "usage_type": "call"}, {"api_name": "obspy.geodetics", "line_number": 181, "usage_type": "name"}, {"api_name": "phasepapy.tt_stations_1D.TTtable1D", "line_number": 192, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 192, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 202, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.BaseTT1D.metadata.create_all", "line_number": 203, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.BaseTT1D", "line_number": 203, "usage_type": "attribute"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 203, "usage_type": "name"}, {"api_name": "obspy.Inventory", "line_number": 206, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 207, "usage_type": "call"}, {"api_name": "obspy.read_inventory", "line_number": 209, "usage_type": "call"}, {"api_name": "re.match", "line_number": 217, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.Station1D", "line_number": 222, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 222, "usage_type": "name"}, {"api_name": "obspy.taup.TauPyModel", "line_number": 228, "usage_type": "call"}, {"api_name": "obspy.taup", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 230, "usage_type": "call"}, {"api_name": "obspy.geodetics.kilometer2degrees", "line_number": 232, "usage_type": "call"}, {"api_name": "obspy.geodetics", "line_number": 232, "usage_type": "name"}, {"api_name": "phasepapy.tt_stations_1D.TTtable1D", "line_number": 243, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 243, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 243, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 255, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.BaseTT1D.metadata.create_all", "line_number": 256, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.BaseTT1D", "line_number": 256, "usage_type": "attribute"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 256, "usage_type": "name"}, {"api_name": "obspy.Inventory", "line_number": 259, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 260, "usage_type": "call"}, {"api_name": "obspy.read_inventory", "line_number": 264, "usage_type": "call"}, {"api_name": "re.match", "line_number": 272, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D.Station1D", "line_number": 279, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 279, "usage_type": "name"}, {"api_name": "obspy.taup.TauPyModel", "line_number": 285, "usage_type": "call"}, {"api_name": "obspy.taup", "line_number": 285, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 287, "usage_type": "call"}, {"api_name": "obspy.geodetics.kilometer2degrees", "line_number": 289, "usage_type": "call"}, {"api_name": "obspy.geodetics", "line_number": 289, "usage_type": "name"}, {"api_name": "phasepapy.tt_stations_1D.TTtable1D", "line_number": 300, "usage_type": "call"}, {"api_name": "phasepapy.tt_stations_1D", "line_number": 300, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 300, "usage_type": "call"}, {"api_name": "obspy.Stream", "line_number": 318, "usage_type": "call"}, {"api_name": "obspy.read", "line_number": 319, "usage_type": "call"}, {"api_name": "obspy.read", "line_number": 320, "usage_type": "call"}, {"api_name": "obspy.read", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 325, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 331, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 331, "usage_type": "name"}, {"api_name": "phasepapy.tables1D.Pick", "line_number": 333, "usage_type": "call"}, {"api_name": "phasepapy.tables1D", "line_number": 333, "usage_type": "name"}, {"api_name": "obspy.UTCDateTime", "line_number": 357, "usage_type": "call"}, {"api_name": "obspy.UTCDateTime", "line_number": 374, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 375, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 375, "usage_type": "name"}, {"api_name": "phasepapy.tables1D.Pick", "line_number": 376, "usage_type": "call"}, {"api_name": "phasepapy.tables1D", "line_number": 376, "usage_type": "name"}, {"api_name": "obspy.UTCDateTime", "line_number": 428, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 433, "usage_type": "call"}, {"api_name": "os.path", "line_number": 433, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 434, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 444, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base.metadata.create_all", "line_number": 445, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base", "line_number": 445, "usage_type": "attribute"}, {"api_name": "phasepapy.tables1D", "line_number": 445, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 448, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 464, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 477, "usage_type": "call"}, {"api_name": "obspy.Inventory", "line_number": 507, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 508, "usage_type": "call"}, {"api_name": "obspy.read_inventory", "line_number": 510, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.Client", "line_number": 513, "usage_type": "call"}, {"api_name": "os.system", "line_number": 517, "usage_type": "call"}, {"api_name": "phasepapy.fbpicker.FBPicker", "line_number": 520, "usage_type": "call"}, {"api_name": "phasepapy.fbpicker", "line_number": 520, "usage_type": "name"}, {"api_name": "obspy.UTCDateTime", "line_number": 538, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 547, "usage_type": "call"}, {"api_name": "os.path", "line_number": 547, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 548, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 556, "usage_type": "call"}, {"api_name": "os.path", "line_number": 556, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 558, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base.metadata.create_all", "line_number": 559, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base", "line_number": 559, "usage_type": "attribute"}, {"api_name": "phasepapy.tables1D", "line_number": 559, "usage_type": "name"}, {"api_name": "phasepapy.assoc1D.LocalAssociator", "line_number": 565, "usage_type": "call"}, {"api_name": "phasepapy.assoc1D", "line_number": 565, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 567, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 567, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 570, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 570, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 575, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 575, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 595, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 597, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 610, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 612, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 624, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 625, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 682, "usage_type": "call"}, {"api_name": "obspy.core.event.Catalog", "line_number": 683, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 687, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 688, "usage_type": "call"}, {"api_name": "obspy.core.event.Origin", "line_number": 690, "usage_type": "call"}, {"api_name": "obspy.core.event.Event", "line_number": 700, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 703, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 704, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 706, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 707, "usage_type": "call"}, {"api_name": "obspy.core.event.base.WaveformStreamID", "line_number": 711, "usage_type": "call"}, {"api_name": "obspy.core.event.Pick", "line_number": 712, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 720, "usage_type": "call"}, {"api_name": "obspy.core.event.Arrival", "line_number": 723, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 730, "usage_type": "call"}, {"api_name": "obspy.UTCDateTime", "line_number": 769, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 776, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 781, "usage_type": "name"}, {"api_name": "obspy.UTCDateTime", "line_number": 788, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 806, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 830, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 832, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 834, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 836, "usage_type": "call"}, {"api_name": "obspy.core.event.Catalog", "line_number": 838, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 906, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 908, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.Client", "line_number": 923, "usage_type": "call"}, {"api_name": "obspy.Stream", "line_number": 953, "usage_type": "call"}, {"api_name": "obspy.read", "line_number": 958, "usage_type": "call"}, {"api_name": "obspy.read", "line_number": 961, "usage_type": "call"}, {"api_name": "obspy.read", "line_number": 964, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 977, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 981, "usage_type": "call"}, {"api_name": "obspy.read_inventory", "line_number": 983, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 985, "usage_type": "call"}, {"api_name": "obspy.read_inventory", "line_number": 986, "usage_type": "call"}, {"api_name": "obspy.UTCDateTime", "line_number": 990, "usage_type": "call"}, {"api_name": "obspy.UTCDateTime", "line_number": 991, "usage_type": "call"}, {"api_name": "obspy.geodetics.gps2dist_azimuth", "line_number": 1015, "usage_type": "call"}, {"api_name": "obspy.geodetics.gps2dist_azimuth", "line_number": 1017, "usage_type": "call"}, {"api_name": "obspy.core.event.Amplitude", "line_number": 1024, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 1028, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 1041, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 1043, "usage_type": "call"}, {"api_name": "obspy.core.event.StationMagnitude", "line_number": 1050, "usage_type": "call"}, {"api_name": "obspy.core.event.ResourceIdentifier", "line_number": 1054, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 1082, "usage_type": "call"}, {"api_name": "obspy.core.event.Magnitude", "line_number": 1084, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1086, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 1106, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 1108, "usage_type": "call"}, {"api_name": "pylab.close", "line_number": 1113, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1125, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 1126, "usage_type": "call"}, {"api_name": "obspy.UTCDateTime", "line_number": 1132, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1143, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 1144, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 1146, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base.metadata.create_all", "line_number": 1147, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base", "line_number": 1147, "usage_type": "attribute"}, {"api_name": "phasepapy.tables1D", "line_number": 1147, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 1150, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 1161, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 1165, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 1166, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 1179, "usage_type": "call"}, {"api_name": "obspy.Inventory", "line_number": 1206, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 1207, "usage_type": "call"}, {"api_name": "obspy.read_inventory", "line_number": 1209, "usage_type": "call"}, {"api_name": "obspy.clients.fdsn.Client", "line_number": 1212, "usage_type": "call"}, {"api_name": "os.system", "line_number": 1216, "usage_type": "call"}, {"api_name": "phasepapy.fbpicker.FBPicker", "line_number": 1219, "usage_type": "call"}, {"api_name": "phasepapy.fbpicker", "line_number": 1219, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 1229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1229, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 1230, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1238, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1238, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 1240, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base.metadata.create_all", "line_number": 1241, "usage_type": "call"}, {"api_name": "phasepapy.tables1D.Base", "line_number": 1241, "usage_type": "attribute"}, {"api_name": "phasepapy.tables1D", "line_number": 1241, "usage_type": "name"}, {"api_name": "phasepapy.assoc1D.LocalAssociator", "line_number": 1247, "usage_type": "call"}, {"api_name": "phasepapy.assoc1D", "line_number": 1247, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1249, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1249, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1252, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1252, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1257, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1257, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 1297, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 1298, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1300, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1431, "usage_type": "call"}, {"api_name": "obspy.UTCDateTime", "line_number": 1437, "usage_type": "call"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 1450, "usage_type": "call"}, {"api_name": "pylab.colorbar", "line_number": 1467, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 1469, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 1473, "usage_type": "call"}, {"api_name": "pylab.savefig", "line_number": 1474, "usage_type": "call"}]} +{"seq_id": "161514518", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, with_statement\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\n\nimport seaborn as sns\n# import string\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn.metrics import confusion_matrix, auc, roc_curve, f1_score\nfrom sklearn import linear_model\nfrom sklearn.metrics import r2_score as r2\n\nfrom sklearn.preprocessing import QuantileTransformer, MinMaxScaler, RobustScaler, PolynomialFeatures\nfrom sklearn.pipeline import make_pipeline\n\n\n\ndef _initialise(self):\n # AUTHOR: Ford Cropley\n #\n # set default style for all plots\n # print(plt.style.available)\n plt.style.use('seaborn-whitegrid') # <<<<< CHOOSE A STYLE TO OVERWRITE if desired #######\n\n # NB ALL other sizes scale with DPI - if double DPI, then labels appear twice as big.\n # - try to keep the DPI constant\n # override some settings\n DPI = 300\n SMALL_FIGSIZE = (10, 7)\n FIGSIZE = (20, 14)\n TEXT_COLOUR = \"black\"\n TEXT_WEIGHT = \"normal\"\n TICK_LBL_SIZE = 16\n AXIS_LBL_SIZE = TICK_LBL_SIZE + 2\n TITLE_LBL_SIZE = TICK_LBL_SIZE + 4\n updates = { # remove black rings round edge of markers\n \"lines.markeredgecolor\": \"white\",\n \"lines.markeredgewidth\": 1,\n \"lines.linewidth\": 1, # line width in points\n # axes\n \"axes.linewidth\": 1, # edge linewidth\n \"axes.grid\": True, # display grid or not\n \"axes.grid.axis\": \"both\", # which axis the grid should apply to\n \"axes.titlesize\": AXIS_LBL_SIZE, # fontsize of the axes title\n \"axes.titleweight\": TEXT_WEIGHT, # font weight of title\n \"axes.labelsize\": AXIS_LBL_SIZE, # fontsize of the x any y labels\n \"axes.labelweight\": TEXT_WEIGHT, # weight of the x and y labels\n \"axes.labelcolor\": TEXT_COLOUR,\n # x axis labels\n \"xtick.major.size\": 3.5, # major tick size in points\n \"xtick.minor.size\": 2, # minor tick size in points\n \"xtick.major.width\": 0.8, # major tick width in points\n \"xtick.minor.width\": 0.6, # minor tick width in points\n \"xtick.color\": TEXT_COLOUR, # color of the tick labels\n \"xtick.labelsize\": TICK_LBL_SIZE, # fontsize of the tick labels\n # y axis labels\n \"ytick.major.size\": 3.5,\n \"ytick.minor.size\": 2,\n \"ytick.major.width\": 0.8,\n \"ytick.minor.width\": 0.6,\n \"ytick.color\": TEXT_COLOUR,\n \"ytick.labelsize\": TICK_LBL_SIZE,\n # grid\n \"grid.color\": \"0b0b0b\", # grid color\n \"grid.linestyle\": \"--\", # solid\n \"grid.linewidth\": 1.0, # in points\n \"grid.alpha\": 0.5, # transparency, between 0.0 and 1.0\n # legend\n \"legend.fontsize\": TICK_LBL_SIZE,\n \"legend.frameon\": True, # if True, draw the legend on a background patch\n \"legend.edgecolor\": \"0b0b0b\", # background patch boundary color\n \"legend.fancybox\": True, # if True, put rounded box around the legend, else rectangle\n \"legend.numpoints\": 3, # the number of marker points in the legend line\n \"legend.scatterpoints\": 3, # number of scatter points\n \"legend.markerscale\": 1.5, # relative size of legend markers vs. original\n # font family\n 'font.family': ['Arial'],\n # figure\n \"figure.titlesize\": TITLE_LBL_SIZE, # size of the figure title (Figure.suptitle())\n \"figure.titleweight\": TEXT_WEIGHT, # weight of the figure title\n \"figure.dpi\": DPI, # figure dots per inch\n \"figure.figsize\": SMALL_FIGSIZE}\n plt.rcParams.update(updates)\n # LATEX\n # - only turn on for smart graphs - too much trouble for general use (eg saving PNG files)\n if self.LATEX_ENABLED:\n plt.rcParams.update({\"text.usetex\": True}) # use inline math for ticks\n\n\ndef plot_comp_all_vars(da, vars_comp, start=None, end=None, qq=(0.0, 1.0), sec=None, ylabs=None,\n legend_labs=None, bars=None, cmap=None,\n ylims=None, mask_date=None, vline=None, vspan=None, file_name=None, figsize=(30, 23),\n alpha=1.0, fontsize=16, interplotspace=(None, None), comp_in_subplot=False, cmp_colors=None,\n reverse=(), k_ticks=None, style=None, grid_plot=True, marker_size=4, date_format=None):\n sns.set(font_scale=1.3)\n sns.set_style(\"ticks\")\n if start is None:\n start = da.index[0]\n if end is None:\n end = da.index[-1]\n\n if file_name is None:\n save = False\n else:\n save = True\n\n if sec is None:\n keys = None\n else:\n keys = list(sec.keys())\n\n if bars is None:\n bars = ['']\n else:\n bars_dict = bars\n bars = list(bars_dict.keys())\n\n if style is None:\n style = '.'\n\n if date_format is None:\n date_format = 'W'\n\n da = da.loc[start:end, :]\n d = da.copy()\n if k_ticks is not None:\n t_keys = list(k_ticks.keys())\n for i in t_keys:\n d.loc[:, i] = d.loc[:, i] / k_ticks[i]\n dqq = d.quantile(q=qq, axis=0)\n if cmap is None:\n cmap = 'Set2'\n n = len(ylabs)\n colors = plt.cm.get_cmap(cmap)(np.linspace(0, 1, n + 1))\n if comp_in_subplot:\n if cmp_colors is None:\n c = len(max(vars_comp))\n cmp_colors = plt.cm.get_cmap(cmap)(np.linspace(0, 1, c))\n else:\n pass\n\n with plt.style.context('seaborn-whitegrid'):\n fig, ax = plt.subplots(nrows=n, sharex=True, figsize=figsize, squeeze=False)\n for i in range(0, n):\n if vars_comp[i][0] in bars:\n ax[i, 0] = d.loc[:, vars_comp[i]].plot(ax=ax[i, 0],\n style=style,\n grid=grid_plot,\n rot=0, ms=marker_size, alpha=alpha, x_compat=True)\n binary_ix = bars_dict[vars_comp[i][0]]\n d_line = d.loc[:, binary_ix]\n d_bin = d[d.loc[:, binary_ix] == 1].dropna(how='all')\n dd_bin = d.loc[d_bin.index, vars_comp[i]]\n ax[i, 0] = dd_bin.plot(style='x', ax=ax[i, 0], ms=marker_size)\n else:\n if keys is not None:\n if vars_comp[i][0] in keys: # Secondary axes\n ax[i, 0] = d.loc[:, vars_comp[i][0]].plot(ax=ax[i, 0],\n style=style, grid=grid_plot,\n rot=0, ms=marker_size, alpha=alpha, x_compat=True)\n for j in range(1, len(sec[keys[0]])):\n ax[i, 0] = d.loc[:, vars_comp[i][j]].plot(ax=ax[i, 0],\n secondary_y=True,\n style=style, grid=grid_plot,\n rot=0, ms=marker_size, alpha=alpha, x_compat=True)\n else:\n ax[i, 0] = d.loc[:, vars_comp[i]].plot(ax=ax[i, 0],\n style=style,\n grid=grid_plot,\n rot=0, ms=marker_size, alpha=alpha, x_compat=True)\n\n if comp_in_subplot:\n for k, color in enumerate(cmp_colors):\n ax[i, 0].lines[k].set_color(color)\n elif vars_comp[i][0] in bars:\n if i == 0:\n ax[i, 0].lines[1].set_color('b')\n ax[i, 0].lines[0].set_color('r')\n else:\n ax[i, 0].lines[1].set_color('gray')\n ax[i, 0].lines[0].set_color(colors[i])\n else:\n if i == 0:\n ax[i, 0].lines[0].set_color('r')\n else:\n if i != 1:\n ax[i, 0].lines[0].set_color(colors[i])\n\n if legend_labs is not None:\n ax[i, 0].legend(legend_labs[i], markerscale=3, prop={'size': fontsize},\n loc='center left', bbox_to_anchor=(1, 0.5))\n else:\n ax[i, 0].legend(markerscale=3, prop={'size': fontsize},\n loc='center left', bbox_to_anchor=(1, 0.5))\n\n if ylims is not None:\n if i in ylims.keys():\n a, b = ylims[i]\n else:\n a = dqq.loc[:, vars_comp[i]].values.min()\n b = dqq.loc[:, vars_comp[i]].values.max()\n\n else:\n a = dqq.loc[:, vars_comp[i]].values.min()\n b = dqq.loc[:, vars_comp[i]].values.max()\n\n ax[i, 0].set_ylim(a, b)\n\n ax[i, 0].set_xlabel('')\n ax[i, 0].set_ylabel(ylabs[i], fontdict={'size': fontsize})\n ax[i, 0].yaxis.set_major_locator(plt.MaxNLocator(5), )\n ax[i, 0].ticklabel_format(axis='y', style='sci', scilimits=(-3, 3), useMathText=True)\n ax[i, 0].yaxis.set_tick_params(labelsize=fontsize)\n ax[i, 0].xaxis.set_tick_params(labelsize=fontsize, rotation=0)\n\n if date_format is 'W':\n locator = mdates.WeekdayLocator(byweekday=0)\n minlocator = mdates.DayLocator()\n elif date_format is 'D':\n locator = mdates.DayLocator()\n minlocator = mdates.HourLocator()\n else:\n locator = mdates.AutoDateLocator(minticks=7, maxticks=10)\n minlocator = mdates.AutoDateLocator(minticks=5, maxticks=10)\n formatter = mdates.ConciseDateFormatter(locator)\n formatter.formats = ['%y', '%b', '%d-%b', '%H:%M:%S', '%H:%M:%S', '%S.%f']\n formatter.zero_formats = ['', '%y', '%d-%b', '%d-%b', '%H:%M:%S', '%H:%M:%S']\n formatter.offset_formats = ['', '', '', '%b %Y', '%d %b %Y', '%d %b %Y']\n\n ax[i, 0].xaxis.set_major_locator(locator)\n ax[i, 0].xaxis.set_major_formatter(formatter)\n\n ax[i, 0].xaxis.set_minor_locator(minlocator)\n ax[i, 0].tick_params(which='minor', length=4, color='k')\n ax[i, 0].tick_params(which='major', length=8, color='k', pad=10)\n\n ax[i, 0].spines['left'].set_linewidth(2)\n ax[i, 0].spines['left'].set_color('gray')\n ax[i, 0].spines['bottom'].set_linewidth(2)\n ax[i, 0].spines['bottom'].set_color('gray')\n\n if len(reverse) != 0:\n if reverse[i] in vars_comp[i]:\n ax[i, 0].invert_yaxis()\n\n if vline is not None:\n for l, k, a in vline:\n if i == a:\n ax[i, 0].axvline(x=l, color=k, linestyle='-')\n if vspan is not None:\n for v, k, a in vspan:\n if i == a:\n ax[i, 0].axvspan(v[0], v[1], facecolor=k, alpha=0.15)\n\n if i != n - 1:\n ax[i, 0].set_xticklabels('')\n\n plt.xticks(ha='center')\n\n fig.align_ylabels(ax[:, 0])\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=interplotspace[0], hspace=interplotspace[1])\n if save:\n fig.savefig(file_name, bbox_inches='tight', pad_inches=0.1, dpi=300)\n\n\ndef norm(x, type_norm=1, stats=None):\n # assert isinstance(x, pd.DataFrame), \"[ERROR]: X must be a pandas DataFrame.\"\n if not isinstance(stats, pd.DataFrame):\n stats = x.describe().transpose()\n\n if type_norm == 1:\n return (x - stats['mean']) / stats['std']\n elif type_norm == 2:\n return 2 * (x - stats['min']) / (stats['max'] - stats['min']) - 1\n else:\n return x\n\n\ndef msd_hourly(y_true, y_pred):\n yy_true = [np.mean(y_true[i:i+60]) for i in range(0, y_true.shape[0], 60)]\n yy_pred = [np.mean(y_pred[i:i+60]) for i in range(0, y_pred.shape[0], 60)]\n error = mse(yy_true, yy_pred)\n return error\n\n\n# Save dataset\ndef save_dataset(name, df, var_names):\n data = {}\n df['Time'] = df.index\n var_names += ['Time']\n for x in var_names:\n data[x] = df.loc[:, x].values\n\n np.save(name + '.npy', data, allow_pickle=True, fix_imports=True)\n return None\n\n\ndef plot_ts_residuals3(df_data, ytrain_true, ytrain_model, ytest_true, ytest_model, start=None, end=None,\n style='.', size=(30, 23), fontsize=14, axs=None, ms=3, date_format='W', legend=True, metric=None):\n sns.set(font_scale=1.3)\n sns.set_style(\"ticks\")\n val_ix = [ytest_true.index[0], ytest_true.index[-1]]\n start = df_data.index[0] if start is None else start\n end = df_data.index[-1] if end is None else end\n metric = 0 if metric is None else metric\n df_data = df_data.loc[start:end, :]\n if legend:\n leg = [['Reference', 'Model']]\n if metric == 0: # MSD\n a = msd_hourly(ytrain_true.values, ytrain_model)\n b = msd_hourly(ytest_true.values, ytest_model)\n text_rmse_train = r'$\\mathrm{MSD_{TRAIN}: %.3E }$' % (a,)\n text_rmse_test = r'$\\mathrm{MSD_{TEST}: %.3E }$' % (b,)\n else:\n a = msd_hourly(ytrain_true.values, ytrain_model)\n b = msd_hourly(ytest_true.values, ytest_model)\n text_rmse_train = r'$\\mathrm{RMSE_{TRAIN}}$'+': {:1.5f} [ppm]'.format(np.sqrt(a))\n text_rmse_test = r'$\\mathrm{RMSE_{TEST}}$'+': {:1.5f} [ppm]'.format(np.sqrt(b))\n props = dict(boxstyle='round', alpha=0.5)\n colors = plt.cm.get_cmap('Set2')\n\n locator = mdates.AutoDateLocator(minticks=7, maxticks=10)\n formatter = mdates.ConciseDateFormatter(locator)\n formatter.formats = ['%y', '%b', # ticks are mostly months\n '%d', # ticks are mostly days\n '%H:%M', # hrs\n '%H:%M', # min\n '%S.%f', ] # secs\n formatter.zero_formats = [''] + formatter.formats[:-1]\n formatter.zero_formats[2] = '%d-%b'\n formatter.offset_formats = ['',\n '%Y',\n '%Y',\n '%d %b %Y',\n '%d %b %Y',\n '%d %b %Y %H:%M', ]\n with plt.style.context('seaborn-whitegrid'):\n if axs is not None:\n ax1 = axs[0]\n else:\n fig, ax1 = plt.subplots(nrows=1, sharex=True, figsize=size, squeeze=False)\n\n ax1 = df_data.loc[:, ['REF', 'Model']].plot(ax=ax1, style=style, cmap=colors, grid=True, rot=0, ms=ms)\n ax1.lines[0].set_color('r')\n ax1.lines[1].set_color('b')\n if legend:\n ax1.legend(leg[0], markerscale=5, prop={'size': fontsize}, loc='center left', bbox_to_anchor=(1, 0.5))\n else:\n ax1.legend('')\n ax1.set_xlabel('')\n ax1.set_ylabel(\"$\\mathrm{CH_{4}}$ (ppm)\", fontdict={'size': fontsize})\n ax1.axvspan(val_ix[0], val_ix[1], facecolor='blue', alpha=0.15)\n ax1.spines['left'].set_linewidth(2)\n ax1.spines['left'].set_color('gray')\n ax1.spines['bottom'].set_linewidth(2)\n ax1.spines['bottom'].set_color('gray')\n\n if date_format is 'W':\n locator = mdates.WeekdayLocator(byweekday=0)\n elif date_format is 'D':\n locator = mdates.DayLocator()\n else:\n locator = mdates.AutoDateLocator(minticks=7, maxticks=10)\n formatter = mdates.ConciseDateFormatter(locator)\n formatter.formats = ['%y', '%b', '%d-%b', '%H:%M', '%H:%M', '%S.%f']\n formatter.zero_formats = ['', '%y', '%d-%b', '%d-%b', '%H:%M', '%H:%M']\n formatter.offset_formats = ['', '', '', '%d %b %Y', '%d %b %Y', '%d %b %Y %H:%M']\n\n ax1.xaxis.set_major_locator(locator)\n ax1.xaxis.set_major_formatter(formatter)\n ax1.xaxis.set_minor_locator(mdates.DayLocator())\n ax1.tick_params(which='minor', length=4, color='k')\n ax1.tick_params(which='major', length=8, color='k', pad=10)\n\n ax1.tick_params(which='minor', length=4, color='k')\n plt.xticks(ha='center')\n\n ax1.text(0.01, 0.98, text_rmse_train, transform=ax1.transAxes, fontsize=15, verticalalignment='top', bbox=props)\n ax1.text(0.25, 0.98, text_rmse_test , transform=ax1.transAxes, fontsize=15, verticalalignment='top', bbox=props)\n\n if axs is None:\n fig.align_ylabels(ax1)\n\n if axs is not None:\n return ax1\n\n\ndef plot_ts_residuals4(df_data, ytrain_true, ytrain_model, ytest_true, ytest_model, start=None, end=None,\n style='.', size=(30, 23), fontsize=14, ms=3, file_name=None):\n if file_name is None:\n save = False\n else:\n save = True\n\n sns.set(font_scale=1.3)\n sns.set_style(\"ticks\")\n val_ix = [ytest_true.index[0], ytest_true.index[-1]]\n start = df_data.index[0] if start is None else start\n end = df_data.index[-1] if end is None else end\n df_data = df_data.loc[start:end, :]\n leg = [['Reference', 'Model']]\n\n a = msd_hourly(ytrain_true.values, ytrain_model)\n b = msd_hourly(ytest_true.values, ytest_model)\n\n text_rmse_train = '$\\mathrm{MSD_{TRAIN}}$: '+'{:4f}'.format(a)\n text_rmse_test = '$\\mathrm{MSD_{TEST }}$: '+'{:4f}'.format(b)\n props = dict(boxstyle='round', alpha=0.5)\n colors = plt.cm.get_cmap('Set2')\n\n locator = mdates.AutoDateLocator(minticks=7, maxticks=10)\n formatter = mdates.ConciseDateFormatter(locator)\n formatter.formats = ['%y', # ticks are mostly years\n '%b', # ticks are mostly months\n '%d', # ticks are mostly days\n '%H:%M', # hrs\n '%H:%M', # min\n '%S.%f', ] # secs\n formatter.zero_formats = [''] + formatter.formats[:-1]\n formatter.zero_formats[2] = '%d-%b'\n formatter.offset_formats = ['',\n '%Y',\n '%Y',\n '%d %b %Y',\n '%d %b %Y',\n '%d %b %Y %H:%M', ]\n with plt.style.context('seaborn-whitegrid'):\n fig, ax1 = plt.subplots(nrows=1, figsize=size, squeeze=True)\n ax1 = df_data.loc[:, ['REF', 'Model']].plot(ax=ax1, style=style, cmap=colors, grid=True, rot=0, ms=ms)\n ax1.lines[0].set_color('r')\n ax1.lines[1].set_color('b')\n ax1.legend(leg[0], markerscale=5, prop={'size': fontsize}, loc='center left', bbox_to_anchor=(1, 0.5))\n ax1.set_xlabel('')\n ax1.set_ylabel(\"$\\mathrm{CH_{4}}$ [ppm]\", fontdict={'size': fontsize})\n ax1.axvspan(val_ix[0], val_ix[1], facecolor='blue', alpha=0.15)\n ax1.xaxis.set_major_locator(locator)\n ax1.xaxis.set_major_formatter(formatter)\n ax1.spines['left'].set_linewidth(2)\n ax1.spines['left'].set_color('gray')\n ax1.spines['bottom'].set_linewidth(2)\n ax1.spines['bottom'].set_color('gray')\n locator = mdates.AutoDateLocator(minticks=7, maxticks=10)\n formatter = mdates.ConciseDateFormatter(locator)\n formatter.formats = ['%y', '%b', '%d', '%H:%M', '%H:%M', '%S.%f', ]\n formatter.zero_formats = [''] + formatter.formats[:-1]\n formatter.zero_formats[2] = '%d-%b'\n formatter.offset_formats = ['', '', '', '', '%b %Y', '%d %b %Y %H:%M', ]\n\n ax1.xaxis.set_major_locator(locator)\n ax1.xaxis.set_major_formatter(formatter)\n ax1.xaxis.set_minor_locator(mdates.DayLocator())\n ax1.tick_params(which='minor', length=4, color='k')\n plt.xticks(ha='center')\n\n ax1.text(0.01, 0.98, text_rmse_train, transform=ax1.transAxes, fontsize=15, verticalalignment='top', bbox=props)\n ax1.text(0.25, 0.98, text_rmse_test , transform=ax1.transAxes, fontsize=15, verticalalignment='top', bbox=props)\n\n if save:\n fig.savefig(file_name, bbox_inches='tight', pad_inches=0.1, dpi=300)\n\n\ndef plot_response(d, xvars, yvars, xlabl, ylabl, ylablsctr, lgn_lab, figsize, fontsize=16, file_name=None, latex=False,\n marker_size=8, degree=1,eq=False):\n if file_name is None:\n save = False\n else:\n save = True\n n = len(xvars)\n n2 = len(ylablsctr)\n\n def make_reg(ds, x_var, y_var, deg):\n dd = ds.loc[:, [x_var] + [y_var]]\n dd.dropna(inplace=True)\n if deg > 1:\n model = make_pipeline(RobustScaler(quantile_range=(1.0, 99.0)),\n PolynomialFeatures(deg),\n linear_model.LinearRegression()\n )\n else:\n model = make_pipeline(RobustScaler(quantile_range=(1.0, 99.0)),\n linear_model.LinearRegression()\n )\n model.fit(dd.loc[:, x_var].values.reshape(len(dd), 1), dd.loc[:, y_var].values.reshape(len(dd), 1))\n # reg = linear_model.LinearRegression()\n # reg.fit(dd.loc[:, x_var].values.reshape(len(dd), 1), dd.loc[:, y_var].values.reshape(len(dd), 1))\n # dd['y_pred'] = reg.predict(dd.loc[:, x_var].values.reshape(len(dd), 1))\n dd['y_pred'] = model.predict(dd.loc[:, x_var].values.reshape(len(dd), 1))\n # if deg > 1:\n m_slope = model.named_steps['linearregression'].coef_[0]\n # else:\n # m_slope = model.named_steps['linearregression'].coef_[0][0]\n r_2_score = r2(dd.loc[:, y_var].values.reshape(len(dd), 1), dd.loc[:, 'y_pred'].values.reshape(len(dd), 1))\n num_obs = len(dd)\n return dd, m_slope, r_2_score, num_obs\n\n def format_axs(ax_f, ylabs, xticks, reverse, fontsize, locator=(3, 1)):\n ax_f.set_xlabel('')\n ax_f.set_ylabel(ylabs, fontdict={'size': fontsize})\n if locator is None:\n ax_f.yaxis.set_major_locator(plt.AutoLocator())\n ax_f.yaxis.set_minor_locator(plt.AutoLocator())\n else:\n ax_f.yaxis.set_major_locator(plt.MultipleLocator(locator[0]))\n ax_f.yaxis.set_minor_locator(plt.MultipleLocator(locator[1]))\n ax_f.yaxis.set_tick_params(labelsize=fontsize)\n ax_f.xaxis.set_tick_params(labelsize=fontsize)\n locator = mdates.AutoDateLocator(minticks=7, maxticks=10)\n formatter = mdates.ConciseDateFormatter(locator)\n formatter.formats = ['%y', '%b', '%d', '%H:%M', '%H:%M', '%S.%f']\n formatter.zero_formats = [''] + formatter.formats[:-1]\n formatter.zero_formats[2] = '%d-%b'\n formatter.offset_formats = ['', '', '', '%d %b %Y', '%d %b %Y', '%d %b %Y %H:%M']\n\n ax_f.xaxis.set_major_locator(locator)\n ax_f.xaxis.set_major_formatter(formatter)\n ax_f.xaxis.set_minor_locator(mdates.DayLocator())\n ax_f.tick_params(which='minor', length=4, color='k')\n ax_f.spines['left'].set_linewidth(2)\n ax_f.spines['left'].set_color('gray')\n ax_f.spines['bottom'].set_linewidth(2)\n ax_f.spines['bottom'].set_color('gray')\n ax_f.spines['right'].set_linewidth(0.5)\n ax_f.spines['right'].set_color('gray')\n ax_f.spines['top'].set_linewidth(0.5)\n ax_f.spines['top'].set_color('gray')\n\n if reverse:\n ax_f.invert_yaxis()\n\n if not xticks:\n ax_f.set_xticklabels('')\n return ax_f\n\n with plt.style.context('seaborn-whitegrid'):\n sns.set(font_scale=1.3)\n sns.set_style(\"ticks\")\n\n x = int(np.ceil(np.sqrt(n)))\n y = int(np.ceil(n/x))\n\n fig, ax = plt.subplots(nrows=y+2, ncols=x, sharey=True, figsize=figsize, squeeze=False)\n gs0 = ax[0, 0].get_gridspec()\n gs1 = ax[1, 0].get_gridspec()\n\n for a in ax[0, :]:\n a.remove()\n\n for a in ax[1, :]:\n a.remove()\n\n ax0 = fig.add_subplot(gs0[0, :])\n ax1 = fig.add_subplot(gs1[1, :])\n\n ax0 = d.loc[:, yvars[0]].plot(ax=ax0, style='.', grid=True, rot=0, ms=marker_size, x_compat=True)\n ax0.set_ylabel(ylabl[0], fontdict={'size': fontsize})\n ax0.lines[0].set_color('r')\n ax0.legend([lgn_lab[0]], markerscale=3, prop={'size': fontsize}, loc='center left', bbox_to_anchor=(1, 0.5))\n ax0 = format_axs(ax0, ylabs=ylablsctr[0], xticks=False, reverse=False, fontsize=fontsize,\n locator=None #(3, 1)\n )\n\n da = d.loc[:, xvars]\n ax1 = da.plot(ax=ax1, style='.', grid=True, rot=0, ms=marker_size, x_compat=True)\n cmp_colors = plt.cm.get_cmap('Set2')(np.linspace(0, 1, n))\n\n for k, color in enumerate(cmp_colors):\n ax1.lines[k].set_color(color)\n\n ax1.legend([lgn_lab[1]], markerscale=3, prop={'size': fontsize}, loc='center left', bbox_to_anchor=(1, 0.5))\n ax1 = format_axs(ax1, ylabs=ylablsctr[1], xticks=True, reverse=False, fontsize=fontsize,\n locator=None)\n count = 0\n\n for i in range(2, y+n2):\n for j in range(0, x):\n if count < n:\n dd_r, m, r_2, n_r = make_reg(d, xvars[count], yvars[count], degree)\n ax[i, j] = dd_r.plot(ax=ax[i, j], grid=True, style='.', ms=marker_size, x=xvars[count], y=yvars[count], x_compat=True)\n #x = dd_r.loc[:, xvars[count]].values\n #y = dd_r.loc[:, yvars[count]].values\n #z = np.polyfit(x, y, degree)\n #p = np.poly1d(z)\n #ax[i, j].plot(x, p(x))\n ax[i, j] = dd_r.plot(ax=ax[i, j], grid=True, style='.', ms=marker_size, x=xvars[count], y='y_pred', x_compat=True)\n ax[i, j].lines[0].set_color('b')\n ax[i, j].lines[1].set_color('r')\n # ax[i, j].lines[2].set_color('k')\n ax[i, j].lines[0].set_label('')\n # if degree > 1:\n c = len(m)-1\n mm = ''\n while c > 0:\n if c == 1:\n mm += str(round(m[c], 4)) + ' $x$ +'\n else:\n mm += str(round(m[c], 4)) + f' $x^{c}$ +'\n c -= 1\n mm += str(m[0])\n if eq:\n ax[i, j].lines[1].set_label(mm + '\\n $\\mathrm{R^{2}}$: ' + '{:1.3f}'.format(r_2) + '\\n # obs: {:d}'.format(n_r))\n else:\n ax[i, j].lines[1].set_label('$\\mathrm{R^{2}}$: ' + '{:1.3f}'.format(r_2) + '\\n # obs: {:d}'.format(n_r))\n # else:\n # ax[i, j].lines[1].set_label('$m$: {:3.3f}'.format(m) + '\\n $\\mathrm{R^{2}}$: ' + '{:1.3f}'.format(r_2) + '\\n # obs: {:d}'.format(n_r))\n ax[i, j].legend(markerscale=3, prop={'size': fontsize}, loc='best', frameon=True, fancybox=True)\n ax[i, j].set_xlabel(xlabl[count], fontdict={'size': fontsize})\n ax[i, j].set_ylabel(ylabl[count], fontdict={'size': fontsize})\n ax[i, j].yaxis.set_tick_params(labelsize=fontsize)\n ax[i, j].xaxis.set_tick_params(labelsize=fontsize)\n ax[i, j].yaxis.set_major_locator(plt.AutoLocator())\n ax[i, j].xaxis.set_major_locator(plt.AutoLocator())\n ax[i, j].yaxis.set_minor_locator(plt.AutoLocator())\n ax[i, j].xaxis.set_minor_locator(plt.AutoLocator())\n ax[i, j].spines['left'].set_linewidth(2)\n ax[i, j].spines['left'].set_color('gray')\n ax[i, j].spines['bottom'].set_linewidth(2)\n ax[i, j].spines['bottom'].set_color('gray')\n ax[i, j].spines['right'].set_linewidth(0.5)\n ax[i, j].spines['right'].set_color('gray')\n ax[i, j].spines['top'].set_linewidth(0.5)\n ax[i, j].spines['top'].set_color('gray')\n if latex:\n print('& ' + mm + ' {:1.3f} & {:d}'.format(r_2, n_r))\n #print('& {:3.3f} & {:1.3f} & {:d}'.format(m, r_2, n_r))\n else:\n print('{} \\t Slope: {} \\t R2: {:1.3f} \\t # obs: {:d}'.format(xvars[count], mm, r_2, n_r))\n\n else:\n ax[i, j].axis('off')\n count += 1\n\n plt.xticks(ha='center')\n fig.align_ylabels([ax0, ax1, ax[2, 0]])\n if save:\n fig.savefig(file_name, bbox_inches='tight', pad_inches=0.1, dpi=300)\n\n\ndef set_ax_conf(ax, leg, ylabl, fontsize=14, loc='W'):\n if leg is not None:\n ax.legend(leg, markerscale=5, prop={'size': fontsize})\n ax.set_xlabel('')\n ax.set_ylabel(ylabl, fontdict={'size': fontsize})\n ax.spines['left'].set_linewidth(2)\n ax.spines['left'].set_color('gray')\n ax.spines['bottom'].set_linewidth(2)\n ax.spines['bottom'].set_color('gray')\n if loc is 'W':\n locator = mdates.WeekdayLocator(byweekday=0)\n elif loc is 'D':\n locator = mdates.DayLocator()\n else:\n locator = mdates.AutoDateLocator(minticks=7, maxticks=10)\n formatter = mdates.ConciseDateFormatter(locator)\n formatter.formats = ['%y', '%b', '%d-%b', '%H:%M:%S', '%H:%M:%S', '%S.%f']\n formatter.zero_formats = ['', '%y', '%d-%b', '%d-%b', '%H:%M:%S', '%H:%M:%S']\n formatter.offset_formats = ['', '', '', '%b %Y', '%d %b %Y', '%d %b %Y']\n\n ax.xaxis.set_major_locator(locator)\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_major_locator(locator)\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_minor_locator(mdates.DayLocator())\n ax.tick_params(which='minor', length=4, color='k')\n return ax\n\n\ndef plot_model_results(data_train, data_test, dates_sample=None, style='.', loc='W', file_name=None):\n\n if file_name is None:\n save = False\n else:\n save = True\n\n if dates_sample is not None:\n if 'train' in list(dates_sample.keys()):\n tr_start, tr_end = dates_sample['train']\n d0 = data_train.loc[tr_start:tr_end, :]\n else:\n d0 = data_train\n if 'test' in list(dates_sample.keys()):\n te_start, te_end = dates_sample['test']\n d1 = data_test.loc[te_start:te_end, :]\n else:\n d1 = data_test\n else:\n d0 = data_train\n d1 = data_test\n\n a = np.sqrt(mse(data_train.loc[:, 'Reference'].values, data_train.loc[:, 'Model'].values))\n b = np.sqrt(mse(data_test.loc[:, 'Reference'].values, data_test.loc[:, 'Model'].values))\n\n msd_train = '$\\mathrm{RMSE_{TRAIN}}$: ' + '{:4f} (ppm)'.format(a)\n msd_test = '$\\mathrm{RMSE_{TEST }}$: ' + '{:4f} (ppm)'.format(b)\n props = dict(boxstyle='round', alpha=0.5)\n\n with plt.style.context('seaborn-whitegrid'):\n fig, ax = plt.subplots(nrows=2, ncols=2, sharex=False, sharey=False, figsize=(20, 8), squeeze=False)\n gs0 = ax[0, 0].get_gridspec()\n gs1 = ax[1, 0].get_gridspec()\n for a in ax[:, 0]:\n a.remove()\n\n for a in ax[:, 1]:\n a.remove()\n\n ax0 = fig.add_subplot(gs0[0, :])\n ax1 = fig.add_subplot(gs1[1, :])\n ax0 = d0.plot(ax=ax0, style=style, grid=True, rot=0, ms=5, legend=False,x_compat=True)\n ax1 = d1.plot(ax=ax1, style=style, grid=True, rot=0, ms=5, legend=False,x_compat=True)\n # ax[0, 2] = data_train.plot.density(ax=ax[0, 2], legend=False)\n # ax[1, 2] = data_test.plot.density(ax=ax[1, 2], legend=False)\n\n ax0.lines[0].set_color('r')\n ax0.lines[1].set_color('b')\n ax1.lines[0].set_color('r')\n ax1.lines[1].set_color('b')\n\n ax0 = set_ax_conf(ax0, leg=None, ylabl=\"$\\mathrm{CH_{4}}$ (ppm)\", fontsize=14, loc=loc)\n ax1 = set_ax_conf(ax1, leg=None, ylabl=\"$\\mathrm{CH_{4}}$ (ppm)\", fontsize=14, loc=loc)\n\n ax0.text(0.01, 0.98, msd_train, transform=ax0.transAxes, fontsize=15, verticalalignment='top', bbox=props)\n ax1.text(0.01, 0.98, msd_test, transform=ax1.transAxes, fontsize=15, verticalalignment='top', bbox=props)\n\n handles, _ = ax0.get_legend_handles_labels()\n fig.legend(handles, ['Reference', 'Model'], loc='lower center', ncol=2, fontsize=14, markerscale=2.0)\n plt.xticks(ha='center')\n plt.subplots_adjust(left=None, bottom=0.15, right=None, top=None, wspace=0.2, hspace=0.2)\n\n if save:\n fig.savefig(file_name, bbox_inches='tight', pad_inches=0.1, dpi=300)\n\n\ndef plot_class_results(data, x_train, x_test, y_train, y_test, y_pred, xvar, yvar,\n xlabel='$\\mathrm{Resistance_{TGS\\ 2611-C00}}$ [$\\mathrm{K\\Omega}$]',\n ylabel='$\\mathrm{CH_{4}}$ [ppm]', file_name=None):\n if file_name is None:\n save = False\n else:\n save = True\n variables = xvar + yvar\n yy_pred = pd.DataFrame(y_pred, index=y_test.index, columns=['Binary'])\n with plt.style.context('seaborn-whitegrid'):\n fig, ax = plt.subplots(nrows=2, ncols=3, sharex=False, sharey=False, figsize=(20, 15), squeeze=False)\n gs0 = ax[0, 0].get_gridspec()\n for a in ax[0, :-1]:\n a.remove()\n ax0 = fig.add_subplot(gs0[0, :-1])\n\n A1 = yy_pred[(yy_pred[y_test == 0].dropna()) == 0].dropna()\n B1 = yy_pred[(yy_pred[y_test == 1].dropna()) == 1].dropna()\n C1 = pd.concat([A1, B1])\n A2 = yy_pred[(yy_pred[y_test == 0].dropna()) == 1].dropna()\n B2 = yy_pred[(yy_pred[y_test == 1].dropna()) == 0].dropna()\n C2 = pd.concat([A2, B2])\n class_0 = data.loc[C1.index, variables]\n class_1 = data.loc[C2.index, variables]\n ax0 = class_0.plot(x=xvar[0], y=yvar[0], style='.', ax=ax0)\n ax0 = class_1.plot(x=xvar[0], y=yvar[0], style='.', ax=ax0)\n ax0.lines[0].set_color('b')\n ax0.lines[1].set_color('r')\n ax0.ticklabel_format(axis='y', style='sci', scilimits=(-2, 2), useMathText=True)\n ax0.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2), useMathText=True)\n ax0.set_ylabel(ylabel)\n ax0.set_xlabel(xlabel)\n ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x / 1000))\n ax0.xaxis.set_major_formatter(ticks_x)\n ax0.spines['left'].set_linewidth(2)\n ax0.spines['left'].set_color('gray')\n ax0.spines['bottom'].set_linewidth(2)\n ax0.spines['bottom'].set_color('gray')\n ax0.legend(['Good classification', 'Bad classification'], fontsize=14, frameon=True, fancybox=True, markerscale=2.0)\n\n fpr, tpr, _ = roc_curve(y_test, y_pred)\n roc_auc = auc(fpr, tpr)\n f1 = f1_score(y_test, y_pred)\n ax[0, 2].plot(fpr, tpr, color='r', lw=2, label='ROC curve\\narea = {:2f}\\nF1-score: {:2f}'.format(roc_auc, f1))\n ax[0, 2].plot([0, 1], [0, 1], color='b', lw=2, linestyle='--')\n ax[0, 2].set_xlim([-0.01, 1.0])\n ax[0, 2].set_ylim([0.0, 1.05])\n ax[0, 2].set_xlabel('False Positive Rate', fontdict={'size': '14'})\n ax[0, 2].set_ylabel('True Positive Rate', fontdict={'size': '14'})\n ax[0, 2].legend(loc=\"lower right\", prop={'size': '14'}, frameon=True, fancybox=True, markerscale=2.0)\n ax[0, 2].spines['left'].set_linewidth(2)\n ax[0, 2].spines['left'].set_color('gray')\n ax[0, 2].spines['bottom'].set_linewidth(2)\n ax[0, 2].spines['bottom'].set_color('gray')\n\n ax[1, 0] = data.loc[y_train[y_train == 0].dropna().index, variables].plot(x=xvar[0], y=yvar[0], style='.', ax=ax[1, 0])\n ax[1, 0] = data.loc[y_train[y_train == 1].dropna().index, variables].plot(x=xvar[0], y=yvar[0], style='.', ax=ax[1, 0], alpha=0.5)\n ax[1, 0] = data.loc[y_test[y_test == 0].dropna().index, variables].plot(x=xvar[0], y=yvar[0], style='.', ax=ax[1, 0])\n ax[1, 0] = data.loc[y_test[y_test == 1].dropna().index, variables].plot(x=xvar[0], y=yvar[0], style='.', ax=ax[1, 0], alpha=0.5)\n ax[1, 0].set_ylabel(ylabel)\n ax[1, 0].set_xlabel(xlabel)\n ax[1, 0].lines[0].set_color('b')\n ax[1, 0].lines[1].set_color('green')\n ax[1, 0].lines[2].set_color('r')\n ax[1, 0].lines[3].set_color('gray')\n ax[1, 0].legend(['True Non Spike labels (Train)',\n 'True Spike labels (Train) ',\n 'True Non Spike labels (Test)',\n 'True Spike labels (Test) '], fontsize=14, frameon=True, fancybox=True, markerscale=2.0)\n ax[1, 0].xaxis.set_major_formatter(ticks_x)\n ax[1, 0].spines['left'].set_linewidth(2)\n ax[1, 0].spines['left'].set_color('gray')\n ax[1, 0].spines['bottom'].set_linewidth(2)\n ax[1, 0].spines['bottom'].set_color('gray')\n\n ax[1, 1] = x_train.loc[:, xvar[0]].plot.density(ax=ax[1, 1])\n ax[1, 1] = x_test.loc[: , xvar[0]].plot.density(ax=ax[1, 1])\n ax[1, 1].lines[0].set_color('r')\n ax[1, 1].lines[1].set_color('b')\n ax[1, 1].ticklabel_format(axis='y', style='sci', scilimits=(-2, 2), useMathText=True)\n ax[1, 1].ticklabel_format(axis='x', style='sci', scilimits=(-2, 2), useMathText=True)\n ax[1, 1].set_xlabel(xlabel)\n ax[1, 1].xaxis.set_major_formatter(ticks_x)\n ax[1, 1].spines['left'].set_linewidth(2)\n ax[1, 1].spines['left'].set_color('gray')\n ax[1, 1].spines['bottom'].set_linewidth(2)\n ax[1, 1].spines['bottom'].set_color('gray')\n ax[1, 1].legend(['Train set', 'Test set'], fontsize=14, frameon=True, fancybox=True)\n\n CF = pd.DataFrame(confusion_matrix(y_test, y_pred), columns=['No spike', 'Spike'])\n CF.index = ['No spike', 'Spike']\n sns.set(font_scale=1.5)\n ax[1, 2] = sns.heatmap(CF, cmap='Accent', annot=True, center=0, square=True, ax=ax[1, 2], fmt='d', annot_kws={\"size\": 14}, cbar=None, robust=True)\n ax[1, 2].set_xticklabels(ax[1, 2].get_xmajorticklabels(), fontsize=16)\n ax[1, 2].set_yticklabels(ax[1, 2].get_ymajorticklabels(), fontsize=16)\n\n plt.subplots_adjust(left=None, bottom=0.15, right=None, top=None, wspace=0.2, hspace=0.2)\n\n if save:\n fig.savefig(file_name, bbox_inches='tight', pad_inches=0.1, dpi=300)\n\n########## OLD Functions ################\n\n# def df_to_plot(folder, case, path, df_data, i=1):\n# file = case + '_' + str(i) + '.nc'\n# data_path = path + 'results/' + folder + '/' + case + '/' + file\n# rfile = Dataset(data_path, 'r')\n# nn = np.array(rfile.variables['opt_outputs_ann'][:])\n# da = np.array(rfile.variables['all_data'][:])\n# val = np.array(rfile.variables['validation_index'][:], dtype=np.uint32)\n# train_rmse = rfile.variables['opt_training_rmse'][:][0]\n# test_rmse = rfile.variables['opt_validation_rmse'][:][0]\n# res = pd.DataFrame(data=da, columns=['CH4'])\n# res['MLP'] = nn\n# res['Time'] = df_data.index[:]\n# res.set_index('Time', inplace=True)\n# return res, val, train_rmse, test_rmse\n\n\n# def plot_ts_residuals(df_data, val, a, b, start=None, end=None, resolution=\"D\", size=(30, 23), diff=True):\n# val_ix = [df_data.index[val[0]], df_data.index[val[-1]]]\n# if start is None:\n# start = df_data.index[0]\n# if end is None:\n# end = df_data.index[-1]\n#\n# df_data = df_data.loc[start:end, :]\n#\n# xticks = pd.date_range(df_data.index[0], df_data.index[len(df_data) - 1], freq=resolution, normalize=False)\n#\n# if diff:\n# df_data.loc[:, 'RESIDUAL'] = df_data.loc[:, 'CH4'] - df_data.loc[:, 'MLP']\n#\n# c = [['CH4', 'MLP'], ['RESIDUAL']] # D.columns\n# leg = [['Reference', 'Model'], ['Residual']]\n# n = len(c) # len(D.columns)\n# colors = plt.cm.get_cmap('Dark2')(np.linspace(0, 1, n))\n# text_rmse_train = r'$RMSE_{TRAIN} [ppm]: %.4f $' % (a,)\n# text_rmse_test = r'$RMSE_{TEST} [ppm]: %.4f $' % (b,)\n# props = dict(boxstyle='round', alpha=0.5)\n# with plt.style.context('seaborn-whitegrid'):\n# fig, ax = plt.subplots(nrows=n, sharex=True, figsize=size)\n#\n# for i in range(0, n):\n# ax[i] = df_data.loc[:, c[i]].plot(ax=ax[i], style='.', grid=True, xticks=xticks.to_list(), rot=60, ms=2)\n# ax[i].lines[0].set_color(colors[i])\n# ax[i].legend(leg[i], markerscale=5, loc='upper right', prop={'size': 14}, bbox_to_anchor=(1, 1.0))\n# ax[i].set_xlabel('')\n# # ax[i].axvspan(D.index[0], D.index[-1], facecolor='white')\n# ax[i].axvspan(val_ix[0], val_ix[1], facecolor='blue', alpha=0.15)\n# if i == n - 1:\n# if resolution == \"D\":\n# ax[i].set_xticklabels(xticks.strftime('%b-%d').tolist(), horizontalalignment='center', fontsize=14)\n# # else:\n# # ax[i].set_xticklabels(xticks.strftime('%b-%d %H:%M').tolist(), horizontalalignment='center', fontsize=14);\n# else:\n# ax[i].set_xticklabels('')\n#\n# ax[0].text(0.01, 0.98, text_rmse_train, transform=ax[0].transAxes, fontsize=15, verticalalignment='top', bbox=props)\n# ax[0].text(0.30, 0.98, text_rmse_test, transform=ax[0].transAxes, fontsize=15, verticalalignment='top', bbox=props)\n#\n# return fig\n\n\n# def scatter_ts(folder, case, i, size=(30, 23)):\n# n = len(i)\n# colors = plt.cm.get_cmap('Dark2')(np.linspace(0, 1, n))\n#\n# with plt.style.context('seaborn-whitegrid'):\n# fig, ax = plt.subplots(ncols=n, sharey=True, figsize=size)\n# for j in range(n):\n# D, val, train, test = df_to_plot(folder, case, i[j])\n# ax[j] = D.loc[:, ['CH4d_ppm', 'MLP']].plot(x='CH4d_ppm', y='MLP', style='.', ax=ax[j], grid=True)\n# ax[j].lines[0].set_color(colors[j])\n# # ax[i].legend(markerscale=5, loc='upper right', prop={'size': 14}, bbox_to_anchor=(1, 1.0))\n# return fig\n\n\n# def comp_study_plot(path, folder, studies, name_studies=None, size=(20, 10)):\n# y_labs = ['RMSE (ppm)', 'BIAS (ppm)', '$\\sigma / \\sigma_{DATA}$ (%)', r'$\\rho$ (%)']\n#\n# if name_studies is None:\n# name_studies = studies\n#\n# RMSE = pd.DataFrame(columns=studies)\n# BIAS = pd.DataFrame(columns=studies)\n# SIGMA = pd.DataFrame(columns=studies)\n# CORR = pd.DataFrame(columns=studies)\n# for study in studies:\n# for test_set in range(1, 51):\n# D, val_ix, rmse_train, rmse_test = df_to_plot(folder, study, i=test_set, path=path)\n# DD = D.iloc[val_ix, :]\n# bias = (DD.loc[:, 'CH4'] - DD.loc[:, 'MLP']).mean()\n#\n# RMSE.loc[test_set, study] = rmse_test\n# BIAS.loc[test_set, study] = bias\n# SIGMA.loc[test_set, study] = (DD.std()[1] / DD.std()[0]) * 100\n# CORR.loc[test_set, study] = DD.corr().loc['CH4', 'MLP'] * 100\n#\n# RMSE.columns = name_studies\n# BIAS.columns = name_studies\n# SIGMA.columns = name_studies\n# CORR.columns = name_studies\n# # n = len(studies)\n# # colors = plt.cm.get_cmap('Dark2')(np.linspace(0, 1, n))\n#\n# with plt.style.context('seaborn-whitegrid'):\n# fig, ax = plt.subplots(nrows=4, ncols=1, sharex=True, figsize=size, squeeze=False)\n#\n# ax[1, 0].axhline(y=0, color='k', linestyle='--', alpha=0.6)\n# ax[2, 0].axhline(y=100, color='k', linestyle='--', alpha=0.6)\n# ax[3, 0].axhline(y=100, color='k', linestyle='--', alpha=0.6)\n#\n# ax[0, 0] = sns.boxplot(ax=ax[0, 0], width=0.3, data=RMSE)\n# ax[1, 0] = sns.boxplot(ax=ax[1, 0], width=0.3, data=BIAS)\n# ax[2, 0] = sns.boxplot(ax=ax[2, 0], width=0.3, data=SIGMA)\n# ax[3, 0] = sns.boxplot(ax=ax[3, 0], width=0.3, data=CORR)\n#\n# for i in range(0, 4):\n# ax[i, 0].set_ylabel(y_labs[i])\n# ax[i, 0].yaxis.set_major_locator(plt.MaxNLocator(5), )\n# y_labels = ax[i, 0].get_yticks()\n# ax[i, 0].set_yticklabels(y_labels, fontsize=14)\n# if i in [2, 3]:\n# ax[i, 0].yaxis.set_major_formatter(ticker.PercentFormatter())\n# else:\n# ax[i, 0].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))\n#\n# return fig\n#\n#\n# def plot_ts_residuals2(df_data, val, a, b, start=None, end=None, resolution=\"D\", size=(30, 23), fontsize=14):\n# sns.set(font_scale=1.3)\n# val_ix = [df_data.index[val[0]], df_data.index[val[-1]]]\n# if start is None:\n# start = df_data.index[0]\n# if end is None:\n# end = df_data.index[-1]\n#\n# df_data = df_data.loc[start:end, :]\n# xticks = pd.date_range(df_data.index[0], df_data.index[len(df_data) - 1], freq=resolution, normalize=False)\n# df_data.loc[:, 'RESIDUAL'] = df_data.loc[:, 'CH4'] - df_data.loc[:, 'MLP']\n# # c = [['CH4', 'MLP'], ['RESIDUAL']] # D.columns\n# leg = [['Reference', 'Model'], ['Residual']]\n# text_rmse_train = r'$MSD_{TRAIN} [ppm]: %.4f $' % (a * a,)\n# text_rmse_test = r'$MSD_{TEST} [ppm]: %.4f $' % (b * b,)\n# props = dict(boxstyle='round', alpha=0.5)\n# colors = plt.cm.get_cmap('Set2')(np.linspace(0, 1, 3))\n# colors2 = plt.cm.get_cmap('Set2')\n# with plt.style.context('seaborn-whitegrid'):\n# fig, ax = plt.subplots(nrows=2, sharex=True, figsize=size)\n# ax[0] = df_data.loc[:, ['CH4', 'MLP']].plot(ax=ax[0], style='.', cmap=colors2, grid=True, xticks=xticks.to_list(), rot=0, ms=3)\n# ax[0].lines[0].set_color('r')\n# ax[0].lines[1].set_color('b')\n# ax[0].legend(leg[0], markerscale=5, prop={'size': fontsize}, loc='center left', bbox_to_anchor=(1, 0.5))\n# ax[0].set_xlabel('')\n# ax[0].set_ylabel('$CH_{4}$ [ppm]', fontdict={'size': fontsize})\n# ax[0].axvspan(val_ix[0], val_ix[1], facecolor='blue', alpha=0.15)\n#\n# ax[1] = df_data.loc[:, ['RESIDUAL']].plot(ax=ax[1], style='.', cmap=colors2, grid=True, xticks=xticks.to_list(), rot=0, ms=3)\n# ax[1].lines[0].set_color(colors[2])\n# ax[1].legend(leg[1], markerscale=5, loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': fontsize})\n# ax[1].set_xlabel('Date')\n# ax[1].set_ylabel('Residual [ppm]', fontdict={'size': fontsize})\n# ax[1].axvspan(val_ix[0], val_ix[1], facecolor='blue', alpha=0.15)\n# if \"D\" in resolution:\n# ax[1].set_xticklabels(xticks.strftime('%b-%d').tolist(), horizontalalignment='center', fontsize=fontsize)\n# else:\n# ax[1].set_xticklabels(xticks.strftime('%b-%d %H:%M').tolist(), horizontalalignment='center', fontsize=fontsize)\n#\n# ax[0].text(0.01, 0.98, text_rmse_train, transform=ax[0].transAxes, fontsize=15, verticalalignment='top', bbox=props)\n# ax[0].text(0.30, 0.98, text_rmse_test, transform=ax[0].transAxes, fontsize=15, verticalalignment='top', bbox=props)\n# fig.align_ylabels(ax[:])\n#\n# for n, a in enumerate(ax):\n# a.text(-0.1, 1.1, string.ascii_uppercase[n], transform=a.transAxes, size=16, weight='bold')\n# return fig\n", "sub_path": "graphics/graphics.py", "file_name": "graphics.py", "file_ext": "py", "file_size_in_byte": 46968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 87, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 91, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 99, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 138, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.context", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 146, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.MaxNLocator", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.dates.WeekdayLocator", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.dates.HourLocator", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.dates.ConciseDateFormatter", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 272, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 285, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 298, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 304, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 324, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 324, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.dates.ConciseDateFormatter", "line_number": 327, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 327, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.context", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 341, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "matplotlib.dates.WeekdayLocator", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 363, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 365, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 365, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 367, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 367, "usage_type": "name"}, {"api_name": "matplotlib.dates.ConciseDateFormatter", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 368, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 380, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 399, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 413, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 413, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 413, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 415, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 415, "usage_type": "name"}, {"api_name": "matplotlib.dates.ConciseDateFormatter", "line_number": 416, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 416, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.context", "line_number": 431, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 431, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 431, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 432, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 432, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 446, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 446, "usage_type": "name"}, {"api_name": "matplotlib.dates.ConciseDateFormatter", "line_number": 447, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 447, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 455, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 455, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 457, "usage_type": "name"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 479, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.RobustScaler", "line_number": 479, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 480, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 481, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 481, "usage_type": "name"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 484, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.RobustScaler", "line_number": 484, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 485, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 485, "usage_type": "name"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 496, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.AutoLocator", "line_number": 504, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 504, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.AutoLocator", "line_number": 505, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 505, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.MultipleLocator", "line_number": 507, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 507, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.MultipleLocator", "line_number": 508, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 508, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 511, "usage_type": "name"}, {"api_name": "matplotlib.dates.ConciseDateFormatter", "line_number": 512, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 512, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 520, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 520, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.context", "line_number": 538, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 538, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 538, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 539, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 540, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 543, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 545, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 545, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 568, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 568, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 568, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 568, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.AutoLocator", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 614, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.AutoLocator", "line_number": 615, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 615, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.AutoLocator", "line_number": 616, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 616, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.AutoLocator", "line_number": 617, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 617, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 636, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 636, "usage_type": "name"}, {"api_name": "matplotlib.dates.WeekdayLocator", "line_number": 652, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 652, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 654, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 654, "usage_type": "name"}, {"api_name": "matplotlib.dates.AutoDateLocator", "line_number": 656, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 656, "usage_type": "name"}, {"api_name": "matplotlib.dates.ConciseDateFormatter", "line_number": 657, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 657, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 666, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 666, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 693, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 694, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 694, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.context", "line_number": 700, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 700, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 700, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 701, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 701, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 730, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 730, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 731, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 731, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 745, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.context", "line_number": 746, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 746, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 746, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 747, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 747, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 755, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 758, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 769, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 769, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 777, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 778, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 779, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 826, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 826, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 828, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 829, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 833, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 833, "usage_type": "name"}]} +{"seq_id": "336410636", "text": "# -*- coding=utf-8 -*-\nfrom app import app, redirect, render_template, request, get_locale, set_language_switch_link, g, serve_static_page, karp_query, mc_pool, set_cache, check_cache\nimport computeviews\nfrom flask import jsonify, url_for\nfrom flask_babel import gettext\nimport helpers\nimport re\nimport static_info\nfrom authors import authors_dict\nimport icu # pip install PyICU\nimport urllib\n\n\n# redirect to specific language landing-page\n@app.route('/')\ndef index():\n return redirect('/' + get_locale())\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n set_language_switch_link(\"index\")\n return render_template('page.html', content=gettext('Contents could not be found!')), 404\n\n\n@app.route('/en', endpoint='index_en')\n@app.route('/sv', endpoint='index_sv')\ndef start():\n page = check_cache(\"start\")\n if page is not None:\n return page\n infotext = helpers.get_infotext(\"start\", request.url_rule.rule)\n set_language_switch_link(\"index\")\n page = render_template('start.html',\n title=\"Svenskt kvinnobiografiskt lexikon\",\n infotext=infotext,\n description=helpers.get_shorttext(infotext))\n return set_cache(page)\n\n\n@app.route(\"/en/about-skbl\", endpoint=\"about-skbl_en\")\n@app.route(\"/sv/om-skbl\", endpoint=\"about-skbl_sv\")\ndef about_skbl():\n page = serve_static_page(\"about-skbl\", gettext(\"About SKBL\"))\n return set_cache(page)\n\n\n@app.route(\"/en/more-women\", endpoint=\"more-women_en\")\n@app.route(\"/sv/fler-kvinnor\", endpoint=\"more-women_sv\")\ndef more_women():\n page = check_cache(\"morewoman\")\n if page is not None:\n return page\n infotext = helpers.get_infotext(\"more-women\", request.url_rule.rule)\n set_language_switch_link(\"more-women\")\n page = render_template('more_women.html',\n women=static_info.more_women,\n infotext=infotext,\n linked_from=request.args.get('linked_from'),\n title=gettext(\"More women\"))\n return set_cache(page, name=\"morewoman\", no_hits=len(static_info.more_women))\n\n\n@app.route(\"/en/biographies\", endpoint=\"biographies_en\")\n@app.route(\"/sv/biografiska-verk\", endpoint=\"biographies_sv\")\ndef biographies():\n page = serve_static_page(\"biographies\", gettext(\"Older biographies\"))\n return set_cache(page)\n\n\n@app.route(\"/en/contact\", endpoint=\"contact_en\")\n@app.route(\"/sv/kontakt\", endpoint=\"contact_sv\")\ndef contact():\n set_language_switch_link(\"contact\")\n\n # Set suggestion checkbox\n if request.args.get('suggest') == 'true':\n mode = \"suggestion\"\n else:\n mode = \"other\"\n page = render_template(\"contact.html\",\n title=gettext(\"Contact\"),\n headline=gettext(\"Contact SKBL\"),\n form_data={},\n mode=mode)\n return set_cache(page)\n\n\n@app.route('/en/contact/', methods=['POST'], endpoint=\"submitted_en\")\n@app.route('/sv/kontakt/', methods=['POST'], endpoint=\"submitted_sv\")\ndef submit_contact_form():\n return set_cache(computeviews.compute_contact_form())\n\n\n@app.route(\"/en/search\", endpoint=\"search_en\")\n@app.route(\"/sv/sok\", endpoint=\"search_sv\")\ndef search():\n set_language_switch_link(\"search\")\n search = request.args.get('q', '').encode('utf-8')\n pagename = 'search' + urllib.quote(search)\n\n page = check_cache(pagename)\n if page is not None:\n return page\n\n advanced_search_text = ''\n if search:\n show = ','.join(['name', 'url', 'undertitel', 'undertitel_eng', 'lifespan'])\n karp_q = {'highlight': True, 'size': app.config['SEARCH_RESULT_SIZE'],\n 'show': show}\n if '*' in search:\n search = re.sub('(?<!\\.)\\*', '.*', search)\n karp_q['q'] = \"extended||and|anything|regexp|%s\" % search\n else:\n karp_q['q'] = \"extended||and|anything|contains|%s\" % search\n\n data = karp_query('minientry', karp_q, mode='skbl')\n with app.open_resource(\"static/pages/advanced-search/%s.html\" % (g.language)) as f:\n advanced_search_text = f.read()\n karp_url = \"https://spraakbanken.gu.se/karp/#?mode=skbl&advanced=false&hpp=25&extended=and%7Cnamn%7Cequals%7C&searchTab=simple&page=1&search=simple%7C%7C\" + search.decode(\"utf-8\")\n else:\n data = {\"hits\": {\"total\": 0, \"hits\": []}}\n karp_url = \"\"\n search = u'\\u200b'.encode('utf8')\n\n t = render_template('list.html', headline=\"\",\n subheadline=gettext('Hits for \"%s\"') % search.decode(\"UTF-8\"),\n hits_name=data[\"hits\"],\n hits=data[\"hits\"],\n advanced_search_text=advanced_search_text.decode(\"UTF-8\"),\n search=search.decode(\"UTF-8\"),\n alphabetic=True,\n karp_url=karp_url,\n more=data[\"hits\"][\"total\"] > app.config[\"SEARCH_RESULT_SIZE\"],\n show_lang_switch=False)\n\n return set_cache(t, name=pagename, no_hits=data[\"hits\"][\"total\"])\n\n\n@app.route(\"/en/place\", endpoint=\"place_index_en\")\n@app.route(\"/sv/ort\", endpoint=\"place_index_sv\")\ndef place_index():\n return set_cache(computeviews.compute_place())\n\n\n@app.route(\"/en/place/<place>\", endpoint=\"place_en\")\n@app.route(\"/sv/ort/<place>\", endpoint=\"place_sv\")\ndef place(place=None):\n pagename = urllib.quote('place_'+place.encode('utf8'))\n art = check_cache(pagename)\n if art is not None:\n return art\n lat = request.args.get('lat')\n lon = request.args.get('lon')\n set_language_switch_link(\"place_index\", place)\n hits = karp_query('query', {'q': \"extended||and|plats.searchraw|equals|%s\" % (place.encode('utf-8'))})\n no_hits = hits['hits']['total']\n if no_hits > 0:\n page = render_template('placelist.html', title=place, lat=lat, lon=lon,\n headline=place, hits=hits[\"hits\"])\n else:\n page = render_template('page.html', content=gettext('Contents could not be found!'))\n return set_cache(page, name=pagename, no_hits=no_hits)\n\n\n@app.route(\"/en/organisation\", endpoint=\"organisation_index_en\")\n@app.route(\"/sv/organisation\", endpoint=\"organisation_index_sv\")\ndef organisation_index():\n return set_cache(computeviews.compute_organisation())\n\n\n@app.route(\"/en/organisation/<result>\", endpoint=\"organisation_en\")\n@app.route(\"/sv/organisation/<result>\", endpoint=\"organisation_sv\")\ndef organisation(result=None):\n title = request.args.get('title')\n\n lang = 'sv' if 'sv' in request.url_rule.rule else 'en'\n if lang == \"en\":\n page = computeviews.searchresult(result, 'organisation', 'id',\n 'organisations', title=title,\n lang=lang, show_lang_switch=False)\n else:\n page = computeviews.searchresult(result, 'organisation', 'id',\n 'organisations', title=title,\n lang=lang, show_lang_switch=False)\n return set_cache(page)\n\n\n@app.route(\"/en/activity\", endpoint=\"activity_index_en\")\n@app.route(\"/sv/verksamhet\", endpoint=\"activity_index_sv\")\ndef activity_index():\n return set_cache(computeviews.compute_activity())\n\n\n@app.route(\"/en/activity/<result>\", endpoint=\"activity_en\")\n@app.route(\"/sv/verksamhet/<result>\", endpoint=\"activity_sv\")\ndef activity(result=None):\n page = computeviews.searchresult(result, name='activity',\n searchfield='verksamhetstext',\n imagefolder='activities', title=result)\n return set_cache(page)\n\n\n@app.route(\"/en/keyword\", endpoint=\"keyword_index_en\")\n@app.route(\"/sv/nyckelord\", endpoint=\"keyword_index_sv\")\ndef keyword_index():\n infotext = helpers.get_infotext(\"keyword\", request.url_rule.rule)\n set_language_switch_link(\"keyword_index\")\n lang = 'sv' if 'sv' in request.url_rule.rule else 'en'\n pagename = 'keyword'\n art = check_cache(pagename, lang=lang)\n if art is not None:\n return art\n\n if lang == \"en\":\n reference_list = []\n queryfield = \"nyckelord_eng\"\n else:\n # Fix list with references to be inserted in results\n reference_list = static_info.keywords_reference_list\n [ref.append(\"reference\") for ref in reference_list]\n queryfield = \"nyckelord\"\n\n art = computeviews.bucketcall(queryfield=queryfield, name='keyword',\n title='Keywords', infotext=infotext,\n alphabetical=True,\n insert_entries=reference_list,\n description=helpers.get_shorttext(infotext))\n return set_cache(art, name=pagename, lang=lang, no_hits=app.config['CACHE_HIT_LIMIT'])\n\n\n@app.route(\"/en/keyword/<result>\", endpoint=\"keyword_en\")\n@app.route(\"/sv/nyckelord/<result>\", endpoint=\"keyword_sv\")\ndef keyword(result=None):\n lang = 'sv' if 'sv' in request.url_rule.rule else 'en'\n if lang == \"en\":\n page = computeviews.searchresult(result, 'keyword', 'nyckelord_eng',\n 'keywords', lang=lang,\n show_lang_switch=False)\n else:\n page = computeviews.searchresult(result, 'keyword', 'nyckelord',\n 'keywords', lang=lang,\n show_lang_switch=False)\n # the page is memcached by searchrelust\n return set_cache(page)\n\n\n# @app.route(\"/en/author_presentations\", endpoint=\"author_presentations_en\")\n# @app.route(\"/sv/forfattar_presentationer\", endpoint=\"author_presentations_sv\")\n# def author_presentations():\n# # JUST FOR TESTING\n# set_language_switch_link(\"author_presentations\")\n#\n# authorinfo = []\n# keylist = authors_dict.keys()\n# keylist.sort(key=lambda k: k.split()[-1])\n# for key in keylist:\n# if authors_dict[key].get(\"publications\"):\n# authors_dict[key][\"publications\"] = [helpers.markdown_html(i) for i in authors_dict[key].get(\"publications\")]\n# authorinfo.append((key, authors_dict[key]))\n# page = render_template('author_presentations.html', authorinfo=authorinfo, title=\"Authors\")\n# return set_cache(page)\n\n\n@app.route(\"/en/articleauthor\", endpoint=\"articleauthor_index_en\")\n@app.route(\"/sv/artikelforfattare\", endpoint=\"articleauthor_index_sv\")\ndef authors():\n infotext = helpers.get_infotext(\"articleauthor\", request.url_rule.rule)\n set_language_switch_link(\"articleauthor_index\")\n return set_cache(computeviews.compute_artikelforfattare(infotext=infotext, description=helpers.get_shorttext(infotext)))\n\n\n@app.route(\"/en/articleauthor/<result>\", endpoint=\"articleauthor_en\")\n@app.route(\"/sv/artikelforfattare/<result>\", endpoint=\"articleauthor_sv\")\ndef author(result=None):\n set_language_switch_link(\"articleauthor_index\")\n rule = request.url_rule\n lang = 'sv' if 'sv' in rule.rule else 'en'\n # Try to get authorinfo in correct language (with Swedish as fallback)\n firstname = result.split(\", \")[-1].strip()\n lastname = result.split(\", \")[0].strip()\n authorinfo = authors_dict.get(firstname + \" \" + lastname)\n if authorinfo:\n authorinfo = [authorinfo.get(lang, authorinfo.get(\"sv\")),\n [helpers.markdown_html(i) for i in authorinfo.get(\"publications\", [])]]\n query = \"extended||and|artikel_forfattare_fornamn.lowerbucket|equals|%s||and|artikel_forfattare_efternamn.lowerbucket|equals|%s\" % (\n firstname.encode(\"UTF-8\"), lastname.encode(\"UTF-8\"))\n page = computeviews.searchresult(result,\n name='articleauthor',\n query=query,\n imagefolder='authors',\n authorinfo=authorinfo,\n show_lang_switch=False)\n return set_cache(page)\n\n\n@app.route(\"/en/article\", endpoint=\"article_index_en\")\n@app.route(\"/sv/artikel\", endpoint=\"article_index_sv\")\ndef article_index(search=None):\n # search is only used by links in article text\n\n set_language_switch_link(\"article_index\")\n search = search or request.args.get('search')\n if search is not None:\n search = search.encode(\"UTF-8\")\n data, id = find_link(search)\n if id:\n # only one hit is found, redirect to that page\n page = redirect(url_for('article_' + g.language, id=id))\n return set_cache(page)\n elif data[\"hits\"][\"total\"] > 1:\n # more than one hit is found, redirect to a listing\n page = redirect(url_for('search_' + g.language, q=search))\n return set_cache(page)\n else:\n # no hits are found redirect to a 'not found' page\n return render_template('page.html', content=gettext('Contents could not be found!')), 404\n\n art = computeviews.compute_article()\n return set_cache(art)\n\n\n@app.route(\"/en/article/<id>\", endpoint=\"article_en\")\n@app.route(\"/sv/artikel/<id>\", endpoint=\"article_sv\")\ndef article(id=None):\n rule = request.url_rule\n if 'sv' in rule.rule:\n lang = \"sv\"\n else:\n lang = \"en\"\n pagename = 'article_' + id\n page = check_cache(pagename, lang=lang)\n if page is not None:\n return page\n data = karp_query('query', {'q': \"extended||and|url|equals|%s\" % (id)})\n if data['hits']['total'] == 0:\n data = karp_query('query', {'q': \"extended||and|id.search|equals|%s\" % (id)})\n set_language_switch_link(\"article_index\", id)\n page = show_article(data, lang)\n return set_cache(page, name=pagename, lang=lang, no_hits=1)\n\n\n@app.route(\"/en/article/EmptyArticle\", endpoint=\"article_empty_en\")\n@app.route(\"/sv/artikel/TomArtikel\", endpoint=\"article_empty_sv\")\ndef empty_article():\n set_language_switch_link(\"article_empty\")\n rule = request.url_rule\n if 'sv' in rule.rule:\n content = u\"\"\"Den här kvinnan saknas än så länge.\"\"\"\n else:\n content = u\"\"\"This entry does not exist yet.\"\"\"\n page = render_template('page.html', content=content)\n return set_cache(page)\n\n\ndef find_link(searchstring):\n # Finds an article based on ISNI or name\n if re.search('^[0-9 ]*$', searchstring):\n searchstring = searchstring.replace(\" \", \"\")\n data = karp_query('query', {'q': \"extended||and|swoid.search|equals|%s\" % (searchstring)})\n else:\n parts = searchstring.split(\" \")\n if \",\" in searchstring or len(parts) == 1: # When there is only a first name (a queen or so)\n # case 1: \"Margareta\"\n # case 2: \"Margareta, drottning\"\n firstname = parts[0] if len(parts) == 1 else searchstring\n data = karp_query('query', {'q': \"extended||and|fornamn.search|contains|%s\" % (firstname)})\n else:\n fornamn = \" \".join(parts[0:-1])\n prefix = \"\"\n last_fornamn = fornamn.split(\" \")[-1]\n if last_fornamn == \"von\" or last_fornamn == \"af\":\n fornamn = \" \".join(fornamn.split(\" \")[0:-1])\n prefix = last_fornamn + \" \"\n efternamn = prefix + parts[-1]\n data = karp_query('query', {'q': \"extended||and|fornamn.search|contains|%s||and|efternamn.search|contains|%s\" % (fornamn, efternamn)})\n # The expected case: only one hit is found\n if data['hits']['total'] == 1:\n url = data['hits']['hits'][0]['_source'].get('url')\n es_id = data['hits']['hits'][0]['_id']\n return data, (url or es_id)\n # Otherwise just return the data\n else:\n return data, ''\n\n\ndef show_article(data, lang=\"sv\"):\n if data['hits']['total'] == 1:\n source = data['hits']['hits'][0]['_source']\n source['url'] = source.get('url') or data['query']['hits']['hits'][0]['_id']\n source['es_id'] = data['hits']['hits'][0]['_id']\n\n # Print html for the names with the calling name and last name in bold\n formatted_names = helpers.format_names(source, \"b\")\n source['showname'] = \"%s <b>%s</b>\" % (formatted_names, source['name'].get('lastname', ''))\n title = \"%s %s\" % (helpers.format_names(source, \"\"), source['name'].get('lastname', ''))\n if source.get('text'):\n source['text'] = helpers.markdown_html(helpers.unescape(helpers.mk_links(source['text'])))\n if source.get('text_eng'):\n source['text_eng'] = helpers.markdown_html(helpers.unescape(helpers.mk_links(source['text_eng'])))\n\n # Extract linked names from source\n source['linked_names'] = find_linked_names(source.get(\"othernames\", {}), source.get(\"showname\"))\n source['othernames'] = helpers.group_by_type(source.get('othernames', {}), 'name')\n\n helpers.collapse_kids(source)\n if \"source\" in source:\n source['source'] = helpers.aggregate_by_type(source['source'], use_markdown=True)\n if \"furtherreference\" in source:\n source['furtherreference'] = helpers.aggregate_by_type(source['furtherreference'], use_markdown=True)\n if type(source[\"article_author\"]) != list:\n source[\"article_author\"] = [source[\"article_author\"]]\n\n # Set description for meta data\n if lang == \"sv\":\n description = helpers.get_shorttext(source.get('text', ''))\n else:\n description = helpers.get_shorttext(source.get('text_eng', source.get('text', '')))\n\n if source.get(\"portrait\"):\n image = source[\"portrait\"][0].get(\"url\", \"\")\n else:\n image = \"\"\n\n # Sort keywords alphabetically\n kw = source.get(\"keyword\", [])\n collator = icu.Collator.createInstance(icu.Locale('sv_SE.UTF-8'))\n kw.sort(key=lambda x: collator.getSortKey(x))\n\n under_development = True if source.get(\"skbl_status\") == \"Under utveckling\" else False\n\n return render_template('article.html',\n article=source,\n article_id=source['es_id'],\n article_url=source['url'],\n title=title,\n description=description,\n image=image,\n under_development=under_development)\n else:\n return render_template('page.html', content=gettext('Contents could not be found!')), 404\n\n\ndef find_linked_names(othernames, showname):\n \"\"\"Find and format linked names.\"\"\"\n linked_names = []\n for item in othernames:\n if item.get(\"mk_link\") is True:\n name = fix_name_order(item.get(\"name\"))\n # Do not add linked name if all of its parts occur in showname\n if any(i for i in name.split() if i not in showname):\n linked_names.append(name)\n return \", \".join(linked_names)\n\n\ndef fix_name_order(name):\n \"\"\"Lastname, Firstname --> Firstname Lastname\"\"\"\n nameparts = name.split(\", \")\n if len(nameparts) == 1:\n return nameparts[0]\n elif len(nameparts) == 2:\n return nameparts[1] + \" \" + nameparts[0]\n elif len(nameparts) == 3:\n return nameparts[2] + \" \" + nameparts[1] + \" \" + nameparts[0]\n\n\n@app.route(\"/en/award\", endpoint=\"award_index_en\")\n@app.route(\"/sv/pris\", endpoint=\"award_index_sv\")\ndef award_index():\n # There are no links to this page, but might be wanted later on\n # Exists only to support award/<result> below\n set_language_switch_link(\"award_index\")\n pagename = 'award'\n art = check_cache(pagename)\n if art is not None:\n return art\n art = computeviews.bucketcall(queryfield='prisbeskrivning', name='award',\n title='Award', infotext='')\n return set_cache(art, name=pagename, no_hits=app.config['CACHE_HIT_LIMIT'])\n\n\n@app.route(\"/en/award/<result>\", endpoint=\"award_en\")\n@app.route(\"/sv/pris/<result>\", endpoint=\"award_sv\")\ndef award(result=None):\n page = computeviews.searchresult(result, name='award',\n searchfield='prisbeskrivning',\n imagefolder='award', searchtype='equals')\n return set_cache(page)\n\n\n@app.route(\"/en/education_institution\", endpoint=\"institution_index_en\")\n@app.route(\"/sv/utbildningsinstitution\", endpoint=\"institution_index_sv\")\ndef institution_index():\n # There are no links to this page, but might be wanted later on\n # Exists only to support institution/<result> below\n set_language_switch_link(\"institution_index\")\n page = computeviews.bucketcall(queryfield='prisbeskrivning', name='award',\n title='Institution', infotext='')\n return set_cache(page)\n\n\n@app.route(\"/en/education_institution/<result>\", endpoint=\"institution_en\")\n@app.route(\"/sv/utbildningsinstitution/<result>\", endpoint=\"institution_sv\")\ndef institution(result=None):\n page = computeviews.searchresult(result,\n name='institution',\n searchfield='utbildningsinstitution',\n title=result)\n return set_cache(page)\n\n\n@app.route(\"/en/article/<id>.json\", endpoint=\"article_json_en\")\n@app.route(\"/sv/artikel/<id>.json\", endpoint=\"article_json_sv\")\ndef article_json(id=None):\n data = karp_query('query', {'q': \"extended||and|url|equals|%s\" % (id)})\n if data['hits']['total'] == 1:\n page = jsonify(data['hits']['hits'][0]['_source'])\n return set_cache(page)\n data = karp_query('query', {'q': \"extended||and|id.search|equals|%s\" % (id)})\n if data['hits']['total'] == 1:\n page = jsonify(data['hits']['hits'][0]['_source'])\n return set_cache(page)\n\n\n# ### Cache handling ###\n@app.route('/emptycache')\ndef emptycache():\n # Users with write premissions to skbl may empty the cache\n emptied = False\n try:\n emptied = computeviews.compute_emptycache(['article', 'activity',\n 'organisation', 'place',\n 'author'])\n except Exception:\n emptied = False\n # return jsonify({\"error\": \"%s\" % e})\n return jsonify({\"cached_emptied\": emptied})\n\n\n@app.route('/cachestats')\ndef cachestats():\n # Show stats of the cache\n with mc_pool.reserve() as client:\n return jsonify({\"cached_stats\": client.get_stats()})\n\n\n@app.route(\"/en/fillcache\", endpoint=\"fillcache_en\")\n@app.route(\"/sv/fillcache\", endpoint=\"fillcache_sv\")\ndef fillcache():\n # Refill the cache (~ touch all pages)\n # This request will take some seconds, users may want to make an\n # asynchronous call\n # Compute new pages\n urls = {'activity': ('en/activity', 'sv/verksamhet'),\n 'article': (\"en/article\", \"sv/artikel\"),\n 'organisation': (\"en/organisation\", \"sv/organisation\"),\n 'place': (\"en/place\", \"sv/ort\"),\n 'forfattare': (\"en/articleauthor/<result>\",\"sv/artikelforfattare/<result>\")\n }\n lang = 'sv' if 'sv' in request.url_rule.rule else 'en'\n lix = 0 if lang == 'eng' else 1\n computeviews.compute_article(cache=False, url=request.url_root+urls['article'][lix])\n computeviews.compute_activity(cache=False, url=request.url_root+urls['activity'][lix])\n computeviews.compute_organisation(cache=False, url=request.url_root+urls['organisation'][lix])\n computeviews.compute_place(cache=False, url=request.url_root+urls['place'][lix])\n computeviews.compute_artikelforfattare(cache=False, url=request.url_root+urls['forfattare'][lix])\n # Copy the pages to the backup fields\n computeviews.copytobackup(['article', 'activity', 'organisation', 'place', 'author'], lang)\n return jsonify({\"cache_filled\": True, \"cached_language\": lang})\n\n\n@app.route('/mcpoolid')\ndef mcpoolid():\n return jsonify({\"id\": id(mc_pool)})\n", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 23919, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "app.redirect", "line_number": 17, "usage_type": "call"}, {"api_name": "app.get_locale", "line_number": 17, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 15, "usage_type": "call"}, {"api_name": "app.app", "line_number": 15, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 22, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 23, "usage_type": "call"}, {"api_name": "app.app.errorhandler", "line_number": 20, "usage_type": "call"}, {"api_name": "app.app", "line_number": 20, "usage_type": "name"}, {"api_name": "app.check_cache", "line_number": 29, "usage_type": "call"}, {"api_name": "helpers.get_infotext", "line_number": 32, "usage_type": "call"}, {"api_name": "app.request.url_rule", "line_number": 32, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 32, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 33, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "helpers.get_shorttext", "line_number": 37, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 38, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 26, "usage_type": "call"}, {"api_name": "app.app", "line_number": 26, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 27, "usage_type": "call"}, {"api_name": "app.app", "line_number": 27, "usage_type": "name"}, {"api_name": "app.serve_static_page", "line_number": 44, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 44, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 45, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 41, "usage_type": "call"}, {"api_name": "app.app", "line_number": 41, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 42, "usage_type": "call"}, {"api_name": "app.app", "line_number": 42, "usage_type": "name"}, {"api_name": "app.check_cache", "line_number": 51, "usage_type": "call"}, {"api_name": "helpers.get_infotext", "line_number": 54, "usage_type": "call"}, {"api_name": "app.request.url_rule", "line_number": 54, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 54, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 55, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 56, "usage_type": "call"}, {"api_name": "static_info.more_women", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.request.args.get", "line_number": 59, "usage_type": "call"}, {"api_name": "app.request.args", "line_number": 59, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask_babel.gettext", "line_number": 60, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 61, "usage_type": "call"}, {"api_name": "static_info.more_women", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "app.app", "line_number": 48, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 49, "usage_type": "call"}, {"api_name": "app.app", "line_number": 49, "usage_type": "name"}, {"api_name": "app.serve_static_page", "line_number": 67, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 67, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 68, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 64, "usage_type": "call"}, {"api_name": "app.app", "line_number": 64, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 65, "usage_type": "call"}, {"api_name": "app.app", "line_number": 65, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 74, "usage_type": "call"}, {"api_name": "app.request.args.get", "line_number": 77, "usage_type": "call"}, {"api_name": "app.request.args", "line_number": 77, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 77, "usage_type": "name"}, {"api_name": "app.render_template", "line_number": 81, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 82, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 83, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 86, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 71, "usage_type": "call"}, {"api_name": "app.app", "line_number": 71, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 72, "usage_type": "call"}, {"api_name": "app.app", "line_number": 72, "usage_type": "name"}, {"api_name": "app.set_cache", "line_number": 92, "usage_type": "call"}, {"api_name": "computeviews.compute_contact_form", "line_number": 92, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 89, "usage_type": "call"}, {"api_name": "app.app", "line_number": 89, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 90, "usage_type": "call"}, {"api_name": "app.app", "line_number": 90, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 98, "usage_type": "call"}, {"api_name": "app.request.args.get", "line_number": 99, "usage_type": "call"}, {"api_name": "app.request.args", "line_number": 99, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 99, "usage_type": "name"}, {"api_name": "urllib.quote", "line_number": 100, "usage_type": "call"}, {"api_name": "app.check_cache", "line_number": 102, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 109, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 109, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 112, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 117, "usage_type": "call"}, {"api_name": "app.app.open_resource", "line_number": 118, "usage_type": "call"}, {"api_name": "app.app", "line_number": 118, "usage_type": "name"}, {"api_name": "app.g.language", "line_number": 118, "usage_type": "attribute"}, {"api_name": "app.g", "line_number": 118, "usage_type": "name"}, {"api_name": "app.render_template", "line_number": 126, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 127, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 134, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 134, "usage_type": "name"}, {"api_name": "app.set_cache", "line_number": 137, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 95, "usage_type": "call"}, {"api_name": "app.app", "line_number": 95, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 96, "usage_type": "call"}, {"api_name": "app.app", "line_number": 96, "usage_type": "name"}, {"api_name": "app.set_cache", "line_number": 143, "usage_type": "call"}, {"api_name": "computeviews.compute_place", "line_number": 143, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 140, "usage_type": "call"}, {"api_name": "app.app", "line_number": 140, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 141, "usage_type": "call"}, {"api_name": "app.app", "line_number": 141, "usage_type": "name"}, {"api_name": "urllib.quote", "line_number": 149, "usage_type": "call"}, {"api_name": "app.check_cache", "line_number": 150, "usage_type": "call"}, {"api_name": "app.request.args.get", "line_number": 153, "usage_type": "call"}, {"api_name": "app.request.args", "line_number": 153, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 153, "usage_type": "name"}, {"api_name": "app.request.args.get", "line_number": 154, "usage_type": "call"}, {"api_name": "app.request.args", "line_number": 154, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 154, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 155, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 156, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 159, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 162, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 162, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 163, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 146, "usage_type": "call"}, {"api_name": "app.app", "line_number": 146, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 147, "usage_type": "call"}, {"api_name": "app.app", "line_number": 147, "usage_type": "name"}, {"api_name": "app.set_cache", "line_number": 169, "usage_type": "call"}, {"api_name": "computeviews.compute_organisation", "line_number": 169, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 166, "usage_type": "call"}, {"api_name": "app.app", "line_number": 166, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 167, "usage_type": "call"}, {"api_name": "app.app", "line_number": 167, "usage_type": "name"}, {"api_name": "app.request.args.get", "line_number": 175, "usage_type": "call"}, {"api_name": "app.request.args", "line_number": 175, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 175, "usage_type": "name"}, {"api_name": "app.request.url_rule", "line_number": 177, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 177, "usage_type": "name"}, {"api_name": "computeviews.searchresult", "line_number": 179, "usage_type": "call"}, {"api_name": "computeviews.searchresult", "line_number": 183, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 186, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 172, "usage_type": "call"}, {"api_name": "app.app", "line_number": 172, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 173, "usage_type": "call"}, {"api_name": "app.app", "line_number": 173, "usage_type": "name"}, {"api_name": "app.set_cache", "line_number": 192, "usage_type": "call"}, {"api_name": "computeviews.compute_activity", "line_number": 192, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 189, "usage_type": "call"}, {"api_name": "app.app", "line_number": 189, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 190, "usage_type": "call"}, {"api_name": "app.app", "line_number": 190, "usage_type": "name"}, {"api_name": "computeviews.searchresult", "line_number": 198, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 201, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 195, "usage_type": "call"}, {"api_name": "app.app", "line_number": 195, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 196, "usage_type": "call"}, {"api_name": "app.app", "line_number": 196, "usage_type": "name"}, {"api_name": "helpers.get_infotext", "line_number": 207, "usage_type": "call"}, {"api_name": "app.request.url_rule", "line_number": 207, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 207, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 208, "usage_type": "call"}, {"api_name": "app.request.url_rule", "line_number": 209, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 209, "usage_type": "name"}, {"api_name": "app.check_cache", "line_number": 211, "usage_type": "call"}, {"api_name": "static_info.keywords_reference_list", "line_number": 220, "usage_type": "attribute"}, {"api_name": "computeviews.bucketcall", "line_number": 224, "usage_type": "call"}, {"api_name": "helpers.get_shorttext", "line_number": 228, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 229, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 229, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 229, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 204, "usage_type": "call"}, {"api_name": "app.app", "line_number": 204, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 205, "usage_type": "call"}, {"api_name": "app.app", "line_number": 205, "usage_type": "name"}, {"api_name": "app.request.url_rule", "line_number": 235, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 235, "usage_type": "name"}, {"api_name": "computeviews.searchresult", "line_number": 237, "usage_type": "call"}, {"api_name": "computeviews.searchresult", "line_number": 241, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 245, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 232, "usage_type": "call"}, {"api_name": "app.app", "line_number": 232, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 233, "usage_type": "call"}, {"api_name": "app.app", "line_number": 233, "usage_type": "name"}, {"api_name": "helpers.get_infotext", "line_number": 268, "usage_type": "call"}, {"api_name": "app.request.url_rule", "line_number": 268, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 268, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 269, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 270, "usage_type": "call"}, {"api_name": "computeviews.compute_artikelforfattare", "line_number": 270, "usage_type": "call"}, {"api_name": "helpers.get_shorttext", "line_number": 270, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 265, "usage_type": "call"}, {"api_name": "app.app", "line_number": 265, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 266, "usage_type": "call"}, {"api_name": "app.app", "line_number": 266, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 276, "usage_type": "call"}, {"api_name": "app.request.url_rule", "line_number": 277, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 277, "usage_type": "name"}, {"api_name": "authors.authors_dict.get", "line_number": 282, "usage_type": "call"}, {"api_name": "authors.authors_dict", "line_number": 282, "usage_type": "name"}, {"api_name": "helpers.markdown_html", "line_number": 285, "usage_type": "call"}, {"api_name": "computeviews.searchresult", "line_number": 288, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 294, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 273, "usage_type": "call"}, {"api_name": "app.app", "line_number": 273, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 274, "usage_type": "call"}, {"api_name": "app.app", "line_number": 274, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 302, "usage_type": "call"}, {"api_name": "app.request.args.get", "line_number": 303, "usage_type": "call"}, {"api_name": "app.request.args", "line_number": 303, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 303, "usage_type": "name"}, {"api_name": "app.redirect", "line_number": 309, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 309, "usage_type": "call"}, {"api_name": "app.g.language", "line_number": 309, "usage_type": "attribute"}, {"api_name": "app.g", "line_number": 309, "usage_type": "name"}, {"api_name": "app.set_cache", "line_number": 310, "usage_type": "call"}, {"api_name": "app.redirect", "line_number": 313, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 313, "usage_type": "call"}, {"api_name": "app.g.language", "line_number": 313, "usage_type": "attribute"}, {"api_name": "app.g", "line_number": 313, "usage_type": "name"}, {"api_name": "app.set_cache", "line_number": 314, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 317, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 317, "usage_type": "call"}, {"api_name": "computeviews.compute_article", "line_number": 319, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 320, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 297, "usage_type": "call"}, {"api_name": "app.app", "line_number": 297, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 298, "usage_type": "call"}, {"api_name": "app.app", "line_number": 298, "usage_type": "name"}, {"api_name": "app.request.url_rule", "line_number": 326, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 326, "usage_type": "name"}, {"api_name": "app.check_cache", "line_number": 332, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 335, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 337, "usage_type": "call"}, {"api_name": "app.set_language_switch_link", "line_number": 338, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 340, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 323, "usage_type": "call"}, {"api_name": "app.app", "line_number": 323, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 324, "usage_type": "call"}, {"api_name": "app.app", "line_number": 324, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 346, "usage_type": "call"}, {"api_name": "app.request.url_rule", "line_number": 347, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 347, "usage_type": "name"}, {"api_name": "app.render_template", "line_number": 352, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 353, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 343, "usage_type": "call"}, {"api_name": "app.app", "line_number": 343, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 344, "usage_type": "call"}, {"api_name": "app.app", "line_number": 344, "usage_type": "name"}, {"api_name": "re.search", "line_number": 358, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 360, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 367, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 376, "usage_type": "call"}, {"api_name": "helpers.format_names", "line_number": 394, "usage_type": "call"}, {"api_name": "helpers.format_names", "line_number": 396, "usage_type": "call"}, {"api_name": "helpers.markdown_html", "line_number": 398, "usage_type": "call"}, {"api_name": "helpers.unescape", "line_number": 398, "usage_type": "call"}, {"api_name": "helpers.mk_links", "line_number": 398, "usage_type": "call"}, {"api_name": "helpers.markdown_html", "line_number": 400, "usage_type": "call"}, {"api_name": "helpers.unescape", "line_number": 400, "usage_type": "call"}, {"api_name": "helpers.mk_links", "line_number": 400, "usage_type": "call"}, {"api_name": "helpers.group_by_type", "line_number": 404, "usage_type": "call"}, {"api_name": "helpers.collapse_kids", "line_number": 406, "usage_type": "call"}, {"api_name": "helpers.aggregate_by_type", "line_number": 408, "usage_type": "call"}, {"api_name": "helpers.aggregate_by_type", "line_number": 410, "usage_type": "call"}, {"api_name": "helpers.get_shorttext", "line_number": 416, "usage_type": "call"}, {"api_name": "helpers.get_shorttext", "line_number": 418, "usage_type": "call"}, {"api_name": "icu.Collator.createInstance", "line_number": 427, "usage_type": "call"}, {"api_name": "icu.Collator", "line_number": 427, "usage_type": "attribute"}, {"api_name": "icu.Locale", "line_number": 427, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 432, "usage_type": "call"}, {"api_name": "app.render_template", "line_number": 441, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 441, "usage_type": "call"}, {"api_name": "app.set_language_switch_link", "line_number": 472, "usage_type": "call"}, {"api_name": "app.check_cache", "line_number": 474, "usage_type": "call"}, {"api_name": "computeviews.bucketcall", "line_number": 477, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 479, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 479, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 479, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 467, "usage_type": "call"}, {"api_name": "app.app", "line_number": 467, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 468, "usage_type": "call"}, {"api_name": "app.app", "line_number": 468, "usage_type": "name"}, {"api_name": "computeviews.searchresult", "line_number": 485, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 488, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 482, "usage_type": "call"}, {"api_name": "app.app", "line_number": 482, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 483, "usage_type": "call"}, {"api_name": "app.app", "line_number": 483, "usage_type": "name"}, {"api_name": "app.set_language_switch_link", "line_number": 496, "usage_type": "call"}, {"api_name": "computeviews.bucketcall", "line_number": 497, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 499, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 491, "usage_type": "call"}, {"api_name": "app.app", "line_number": 491, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 492, "usage_type": "call"}, {"api_name": "app.app", "line_number": 492, "usage_type": "name"}, {"api_name": "computeviews.searchresult", "line_number": 505, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 509, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 502, "usage_type": "call"}, {"api_name": "app.app", "line_number": 502, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 503, "usage_type": "call"}, {"api_name": "app.app", "line_number": 503, "usage_type": "name"}, {"api_name": "app.karp_query", "line_number": 515, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 517, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 518, "usage_type": "call"}, {"api_name": "app.karp_query", "line_number": 519, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 521, "usage_type": "call"}, {"api_name": "app.set_cache", "line_number": 522, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 512, "usage_type": "call"}, {"api_name": "app.app", "line_number": 512, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 513, "usage_type": "call"}, {"api_name": "app.app", "line_number": 513, "usage_type": "name"}, {"api_name": "computeviews.compute_emptycache", "line_number": 531, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 537, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 526, "usage_type": "call"}, {"api_name": "app.app", "line_number": 526, "usage_type": "name"}, {"api_name": "app.mc_pool.reserve", "line_number": 543, "usage_type": "call"}, {"api_name": "app.mc_pool", "line_number": 543, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 544, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 540, "usage_type": "call"}, {"api_name": "app.app", "line_number": 540, "usage_type": "name"}, {"api_name": "app.request.url_rule", "line_number": 560, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 560, "usage_type": "name"}, {"api_name": "computeviews.compute_article", "line_number": 562, "usage_type": "call"}, {"api_name": "app.request.url_root", "line_number": 562, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 562, "usage_type": "name"}, {"api_name": "computeviews.compute_activity", "line_number": 563, "usage_type": "call"}, {"api_name": "app.request.url_root", "line_number": 563, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 563, "usage_type": "name"}, {"api_name": "computeviews.compute_organisation", "line_number": 564, "usage_type": "call"}, {"api_name": "app.request.url_root", "line_number": 564, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 564, "usage_type": "name"}, {"api_name": "computeviews.compute_place", "line_number": 565, "usage_type": "call"}, {"api_name": "app.request.url_root", "line_number": 565, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 565, "usage_type": "name"}, {"api_name": "computeviews.compute_artikelforfattare", "line_number": 566, "usage_type": "call"}, {"api_name": "app.request.url_root", "line_number": 566, "usage_type": "attribute"}, {"api_name": "app.request", "line_number": 566, "usage_type": "name"}, {"api_name": "computeviews.copytobackup", "line_number": 568, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 569, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 547, "usage_type": "call"}, {"api_name": "app.app", "line_number": 547, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 548, "usage_type": "call"}, {"api_name": "app.app", "line_number": 548, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 574, "usage_type": "call"}, {"api_name": "app.mc_pool", "line_number": 574, "usage_type": "argument"}, {"api_name": "app.app.route", "line_number": 572, "usage_type": "call"}, {"api_name": "app.app", "line_number": 572, "usage_type": "name"}]} +{"seq_id": "196346092", "text": "from logging import currentframe\nfrom os import stat\nfrom re import M\nimport time, json\nfrom datetime import datetime as dts\nfrom flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import CheckConstraint\nfrom flask_login import LoginManager,login_user, logout_user, login_required, current_user,UserMixin\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom py2neo import Graph\nimport pandas as pd\nimport numpy as np\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ncritic_weight=5.0\noffset = 50\ncolorDic = {\n 'Film-Noir': '#33331a',\n 'Sport': '#00b300',\n 'Comedy': '#33adff',\n 'Fantasy': '#ff80ff',\n 'Family': '#9999ff',\n 'Reality-TV': '#661a00',\n 'Documentary': '#3d3d5c',\n 'Action': '#ff0000',\n 'Musical': '#cccc00',\n 'News': '#334d4d',\n 'Game-Show': '#33cccc',\n 'Thriller': '#2f2f1e',\n 'Crime': '#8f00b3',\n 'History': '#ffb84d',\n 'Biography': '#bf80ff',\n 'Sci-Fi': '#3366ff',\n 'War': '#b3003b',\n 'Short': '#00804d',\n 'Horror': '#66004d',\n 'Music': '#c6ff1a',\n 'Romance': '#ff1a75',\n 'Western': '#ffb366',\n 'Drama': '#003333',\n 'Adventure': '#ff4d4d',\n 'Mystery': '#1a6600',\n 'Animation': '#000099'\n}\n\n\ndb = SQLAlchemy(app)\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy\n username = db.Column(db.String(100), unique=True, nullable=False)\n password = db.Column(db.String(100), nullable = False)\n name = db.Column(db.String(1000), nullable=False)\n gender = db.Column(db.String(20), CheckConstraint(\"gender in ('Male', 'Female', 'Other')\"))\n dob = db.Column(db.DateTime)\n isCritic = db.Column(db.Boolean, nullable=False)\n\n def __init__(self, username, password, name, gender, dob, isCritic=False):\n self.username = username\n self.password = password\n self.name = name\n self.gender = gender\n self.dob = dob\n self.isCritic = isCritic\n\ndb.init_app(app)\nlogin_manager = LoginManager()\nlogin_manager.login_view = '/login'\nlogin_manager.init_app(app)\n\ngraph = Graph(name=\"recommendersystem\",user=\"neo4j\",password=\"werock1234\")\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n@app.route('/checkUserLoggedIn', methods=['GET'])\ndef checkUserLoggedIn():\n if current_user.is_authenticated:\n return {'isUserLoggedIn': True, 'isAdmin': current_user.id==1, 'isCritic': current_user.isCritic}\n else:\n return {'isUserLoggedIn': False, 'isAdmin': False, 'isCritic': False}\n\n@app.route('/loginUser', methods=['POST'])\ndef loginUser():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n password = data['password'].strip()\n user = User.query.filter_by(username = username).first()\n if not user:\n return {'loggedIn': False, 'loginerror': \"Username does not exist\",'isAdmin': False, 'isCritic': False}\n elif not check_password_hash(user.password, password):\n return {'loggedIn': False, 'loginerror': \"Invalid Password\",'isAdmin': False, 'isCritic': False}\n else:\n login_user(user, remember=False)\n return {'loggedIn': True, 'loginerror': \"NA\",'isAdmin': user.id==1, 'isCritic': user.isCritic}\n except Exception as e:\n return {'loggedIn': False, 'loginerror': \"Unknown Error\",'isAdmin': False, 'isCritic': False}\n\n@app.route('/registerUser', methods=['POST'])\ndef registerUser():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n password = data['password'].strip()\n name = data['name'].strip()\n gender = data['gender'].strip()\n dob = data['dob'].strip()\n dtsObj = dts.strptime(dob,'%Y-%m-%d')\n user = User.query.filter_by(username = username).first()\n if user:\n return {'loggedIn': False, 'registererror': \"Username already exists\"}\n newuser = User(username,generate_password_hash(password,method='sha256'),name,gender,dtsObj)\n db.session.add(newuser)\n tx = graph.begin()\n statement = \"CREATE (a:User {username: $username, name: $name, gender: $gender, dob: $dob})\"\n tx.run(statement, {'username': username, 'name': name, 'gender': gender, 'dob': dob})\n db.session.commit()\n tx.commit()\n login_user(newuser, remember=False)\n return {'loggedIn': True, 'registererror': \"NA\"}\n except Exception as e:\n return {'loggedIn': False, 'registererror': \"Unknown Error\"}\n\n@app.route('/logoutUser', methods=['POST'])\n@login_required\ndef logoutUser():\n if request.method == 'POST':\n try:\n logout_user()\n return {'loggedOut': True, 'logoutError': \"NA\"}\n except Exception as e:\n return {'loggedOut': False, 'logoutError': \"Unknown Error\"}\n\n@app.route('/getUserDetails',methods=['GET'])\ndef getUserDetails():\n if current_user.is_authenticated:\n return {'isUserLoggedIn': True, 'username': current_user.name, 'isAdmin': current_user.id==1, 'isCritic': current_user.isCritic}\n else:\n return {'isUserLoggedIn': False, 'username': \"\", 'isAdmin': False, 'isCritic': False}\n\n@app.route('/getMovieList',methods=['POST'])\n@login_required\ndef getMovieList():\n if current_user.is_authenticated and request.method=='POST':\n data = json.loads(request.data)\n searchText = data[\"searchText\"].strip().lower()\n searchOption = data[\"searchOption\"].strip()\n currentPage = data[\"currentPage\"]\n skipValue = (currentPage-1)*offset\n tx = graph.begin()\n if(searchOption==\"Title\"):\n statement = \"MATCH (m:Movies),(u:User {username: $username}) where toLower(m.title) contains $searchText and not ((m)<-[:rated]-(u)) CALL { WITH m,u OPTIONAL MATCH (m)-[:is_genre]->(g:Genres)<-[:likedGenre]-(u) return (coalesce(count(g),0)+1) as score1 } CALL { WITH m,u OPTIONAL MATCH (m)<--(b:Celebrity)<-[:favorite]-(u) return (coalesce(count(b),0)+1) as score2} return m,score1*score2*(m.avg_rating) as score ORDER BY score DESC SKIP $skipValue LIMIT $offset;\"\n statement1 = \"MATCH (m:Movies), (u:User {username: $username}) WHERE toLower(m.title) contains $searchText and not ((m)<-[:rated]-(u)) RETURN count(m);\"\n elif(searchOption==\"Actor\"):\n statement = \"MATCH (a:Celebrity)-[:acted_in]->(m:Movies),(u:User {username: $username}) where toLower(a.name) contains $searchText and not ((m)<-[:rated]-(u)) CALL { WITH m,u OPTIONAL MATCH (m)-[:is_genre]->(g:Genres)<-[:likedGenre]-(u) return (coalesce(count(g),0)+1) as score1 } CALL { WITH m,u OPTIONAL MATCH (m)<--(b:Celebrity)<-[:favorite]-(u) return (coalesce(count(b),0)+1) as score2} return m,score1*score2*(m.avg_rating) as score ORDER BY score DESC SKIP $skipValue LIMIT $offset;\"\n statement1 = \"MATCH (a:Celebrity)-[:acted_in]->(m:Movies),(u:User {username: $username}) where toLower(a.name) contains $searchText and not ((m)<-[:rated]-(u)) RETURN count(m);\"\n elif(searchOption==\"Director\"):\n statement = \"MATCH (a:Celebrity)-[:directed]->(m:Movies),(u:User {username: $username}) where toLower(a.name) contains $searchText and not ((m)<-[:rated]-(u)) CALL { WITH m,u OPTIONAL MATCH (m)-[:is_genre]->(g:Genres)<-[:likedGenre]-(u) return (coalesce(count(g),0)+1) as score1 } CALL { WITH m,u OPTIONAL MATCH (m)<--(b:Celebrity)<-[:favorite]-(u) return (coalesce(count(b),0)+1) as score2} return m,score1*score2*(m.avg_rating) as score ORDER BY score DESC SKIP $skipValue LIMIT $offset;\"\n statement1 = \"MATCH (a:Celebrity)-[:directed]->(m:Movies),(u:User {username: $username}) where toLower(a.name) contains $searchText and not ((m)<-[:rated]-(u)) RETURN count(m);\"\n elif(searchOption==\"Year\"):\n statement = \"MATCH (m:Movies),(u:User {username: $username}) where m.year_released = $searchText and not ((m)<-[:rated]-(u)) CALL { WITH m,u OPTIONAL MATCH (m)-[:is_genre]->(g:Genres)<-[:likedGenre]-(u) return (coalesce(count(g),0)+1) as score1 } CALL { WITH m,u OPTIONAL MATCH (m)<--(b:Celebrity)<-[:favorite]-(u) return (coalesce(count(b),0)+1) as score2} return m,score1*score2*(m.avg_rating) as score ORDER BY score DESC SKIP $skipValue LIMIT $offset;\"\n statement1 = \"MATCH (m:Movies),(u:User {username: $username}) where m.year_released = $searchText and not ((m)<-[:rated]-(u)) RETURN count(m);\"\n searchText = int(searchText)\n movieList = tx.run(statement, {'username': current_user.username, 'searchText': searchText, 'offset': offset, 'skipValue': skipValue}).data()\n numMovies = tx.run(statement1, {'username': current_user.username,'searchText': searchText}).data()[0]['count(m)']\n maxPageNumber = numMovies//offset if numMovies%offset==0 else 1 + numMovies//offset\n\n ans = []\n for x in movieList:\n y = x['m']\n movieEntry = {}\n movieEntry['id'] = y['movie_id']\n movieEntry['title'] = y['title']\n movieEntry['year'] = y['year_released']\n movieEntry['rating'] = round(y['avg_rating'],2)\n statement = \"MATCH (m:Movies {movie_id: $movie_id})-[:is_genre]->(g:Genres) return g;\"\n genres = tx.run(statement, {'movie_id': y['movie_id']}).data()\n genreList = [g['g']['name'] for g in genres]\n # movieEntry['genre'] = \", \".join(genreList)\n movieEntry['genreList'] = genreList\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:acted_in]-(g:Celebrity) return g;\"\n actors = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['actors'] = [g['g']['name'] for g in actors]\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:directed]-(g:Celebrity) return g;\"\n director = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['director'] = director[0]['g']['name'] if (len(director)>0) else \"\"\n ans.append(movieEntry)\n return {'movieList': ans, 'totalPages': maxPageNumber, 'success': True, 'error': \"NA\"}\n\n@app.route('/getMovieListCritic',methods=['POST'])\n@login_required\ndef getMovieListCritic():\n if current_user.is_authenticated:\n data = json.loads(request.data)\n searchText = data[\"searchText\"].strip().lower()\n searchOption = data[\"searchOption\"].strip()\n currentPage = data[\"currentPage\"]\n skipValue = (currentPage-1)*offset\n tx = graph.begin()\n if(searchOption==\"Title\"):\n statement = \"MATCH (m:Movies) where toLower(m.title) contains $searchText return m SKIP $skipValue LIMIT $offset;\"\n statement1 = \"MATCH (m:Movies) WHERE toLower(m.title) contains $searchText RETURN count(m);\"\n elif(searchOption==\"Actor\"):\n statement = \"MATCH (m:Movies)<-[:acted_in]-(a:Celebrity) where toLower(a.name) contains $searchText return m SKIP $skipValue LIMIT $offset; \"\n statement1 = \"MATCH (m:Movies)<-[:acted_in]-(a:Celebrity) where toLower(a.name) contains $searchText RETURN count(m);\"\n elif(searchOption==\"Director\"):\n statement = \"MATCH (m:Movies)<-[:directed]-(a:Celebrity) where toLower(a.name) contains $searchText return m SKIP $skipValue LIMIT $offset; \"\n statement1 = \"MATCH (m:Movies)<-[:directed]-(a:Celebrity) where toLower(a.name) contains $searchText RETURN count(m);\"\n elif(searchOption==\"Year\"):\n statement = \"MATCH (m:Movies) where m.year_released = $searchText return m SKIP $skipValue LIMIT $offset;\"\n statement1 = \"MATCH (m:Movies) where m.year_released = $searchText RETURN count(m);\"\n searchText = int(searchText)\n movieList = tx.run(statement, {'searchText': searchText, 'offset': offset, 'skipValue': skipValue}).data()\n numMovies = tx.run(statement1, {'searchText': searchText}).data()[0]['count(m)']\n maxPageNumber = numMovies//offset if numMovies%offset==0 else 1 + numMovies//offset\n ans = []\n for x in movieList:\n y = x['m']\n movieEntry = {}\n movieEntry['id'] = y['movie_id']\n movieEntry['title'] = y['title']\n movieEntry['year'] = y['year_released']\n movieEntry['rating'] = round(y['avg_rating'],2)\n statement = \"MATCH (m:Movies {movie_id: $movie_id})-[:is_genre]->(g:Genres) return g;\"\n genres = tx.run(statement, {'movie_id': y['movie_id']}).data()\n genreList = [g['g']['name'] for g in genres]\n # movieEntry['genre'] = \", \".join(genreList)\n movieEntry['genreList'] = genreList\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:acted_in]-(g:Celebrity) return g;\"\n actors = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['actors'] = [g['g']['name'] for g in actors]\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:directed]-(g:Celebrity) return g;\"\n director = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['director'] = director[0]['g']['name'] if (len(director)>0) else \"\"\n ans.append(movieEntry)\n return {'movieList': ans, 'totalPages': maxPageNumber, 'success': True, 'error': \"NA\"}\n\n@app.route('/getWatchHistory',methods=['GET'])\n@login_required\ndef getWatchHistory():\n if current_user.is_authenticated:\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $username})-[:rated]->(m:Movies) return m;\"\n movieList = tx.run(statement, {'username': current_user.username}).data()\n ans = []\n for x in movieList:\n y = x['m']\n movieEntry = {}\n movieEntry['id'] = y['movie_id']\n movieEntry['title'] = y['title']\n movieEntry['year'] = y['year_released']\n movieEntry['rating'] = round(y['avg_rating'],2)\n statement = \"MATCH (m:Movies {movie_id: $movie_id})-[:is_genre]->(g:Genres) return g;\"\n genres = tx.run(statement, {'movie_id': y['movie_id']}).data()\n genreList = [g['g']['name'] for g in genres]\n # movieEntry['genre'] = \", \".join(genreList)\n movieEntry['genreList'] = genreList\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:acted_in]-(g:Celebrity) return g;\"\n actors = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['actors'] = [g['g']['name'] for g in actors]\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:directed]-(g:Celebrity) return g;\"\n director = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['director'] = director[0]['g']['name'] if (len(director)>0) else \"\"\n ans.append(movieEntry)\n return {'watchHistory': ans}\n\ndef getTempFriendRecs():\n friendRecs = []\n for i in range(1,11):\n name = f\"Friend{i}\"\n friendEntry = {}\n friendEntry[\"name\"] = name\n movies = []\n for j in range(1,4):\n movieEntry = {}\n movieEntry['id'] = j\n movieEntry['title'] = f\"Movie{j}\"\n movieEntry['year'] = 2000 + j\n movieEntry['rating'] = 4.43\n movieEntry['duration'] = 120\n movieEntry['genreList'] = [\"Action\", \"Horror\"]\n movieEntry['actors'] = [\"Actor1\", \"Actor2\"]\n movieEntry['director'] = \"Director\"\n movieEntry['reviews'] = []\n movieEntry['numUsers'] = 100\n movies.append(movieEntry)\n friendEntry[\"movies\"] = movies\n friendRecs.append(friendEntry)\n return friendRecs\n@app.route('/getFriendRecommendations',methods=['GET'])\n@login_required\ndef getFriendRecommendations():\n if current_user.is_authenticated:\n tx = graph.begin()\n statement = \"MATCH (a:Recommendation {friend2: $username})-[:recommending_user]->(b:User) return b;\"\n friendList = tx.run(statement, {'username': current_user.username}).data()\n friendRecs = []\n for friendTemp in friendList:\n friend = friendTemp['b']\n friendEntry = {}\n friendEntry[\"name\"] = friend['username']\n statement = \"MATCH (a:Recommendation {friend1: $friendUsername, friend2: $username})-[:movie_recommended]->(m:Movies) return m;\"\n movies = tx.run(statement, {'username': current_user.username, 'friendUsername': friend['username']}).data()\n if(len(movies)==0): \n continue\n friendEntry[\"movies\"] = []\n for j in movies:\n movie = j['m']\n movieEntry = {}\n movieEntry['id'] = movie['movie_id']\n movieEntry['title'] = movie['title']\n movieEntry['year'] = movie['year_released']\n movieEntry['rating'] = round(movie['avg_rating'],2)\n movieEntry['duration'] = movie['duration']\n statement = \"MATCH (m:Movies {movie_id: $movie_id})-[:is_genre]->(g:Genres) return g;\"\n genres = tx.run(statement, {'movie_id': movie['movie_id']}).data()\n genreList = [g['g']['name'] for g in genres]\n # movieEntry['genre'] = \", \".join(genreList)\n movieEntry['genreList'] = genreList\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:acted_in]-(g:Celebrity) return g;\"\n actors = tx.run(statement, {'movie_id': movie['movie_id']}).data() \n movieEntry['actors'] = [g['g']['name'] for g in actors]\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:directed]-(g:Celebrity) return g;\"\n director = tx.run(statement, {'movie_id': movie['movie_id']}).data() \n movieEntry['director'] = director[0]['g']['name'] if (len(director)>0) else \"\"\n reviews = []\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[r:review]-(g:Critic) return g,r.review_text;\"\n reviewtemp = tx.run(statement, {'movie_id': movie['movie_id']}).data()\n for i in reviewtemp:\n reviewDic = {}\n reviewDic['reviewedBy'] = i['g']['name']\n reviewDic['content'] = i['r.review_text']\n reviews.append(reviewDic)\n movieEntry['reviews'] = reviews\n movieEntry['numUsers'] = movie['no_user_ratings']\n friendEntry[\"movies\"].append(movieEntry)\n friendRecs.append(friendEntry)\n # friendRecs = getTempFriendRecs()\n return {\"friendRecs\":friendRecs}\n\n@app.route('/getFriendList',methods=['GET'])\n@login_required\ndef getFriendList():\n if current_user.is_authenticated:\n try:\n tx=graph.begin()\n statement = \"MATCH (p:User {username: $username})-[:friend]-(q:User) return q\"\n friendList = tx.run(statement, {'username': current_user.username}).data()\n friendList = [x['q'] for x in friendList]\n # friendList = [{\"username\":f\"Friend{i}\"} for i in range(1,5)]\n return {\"friendList\": friendList, \"error\":\"NA\"}\n except Exception as e:\n return {\"friendList\":[], \"error\": \"Unknown Error\"}\n\n@app.route('/getFavouriteCelebrities',methods=['GET'])\n@login_required\ndef getFavouriteCelebrities():\n if current_user.is_authenticated:\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $username})-[:favorite]->(c:Celebrity) return c\"\n celebList = tx.run(statement,{'username': current_user.username}).data()\n celebList = [x['c'] for x in celebList]\n # print(celebList)\n return {\"favCelebsList\": celebList}\n\n\n@app.route('/getAllGenres',methods=['GET'])\n@login_required\ndef getAllGenres():\n if current_user.is_authenticated:\n tf = graph.begin()\n statement = \"MATCH (p:Genres) return p\"\n x = tf.run(statement).data()\n genreList = [i['p'] for i in x]\n i = 0\n for x in genreList:\n x['color'] = colorDic[x['name']]\n # print(f\"'{x['name']}': '{x['color']}',\")\n i+=1\n return {'genreList': genreList}\n\n@app.route('/getLikedGenres',methods=['GET'])\n@login_required\ndef getLikedGenres():\n if current_user.is_authenticated:\n tf = graph.begin()\n statement = \"MATCH (p:User {username: $username })-[:likedGenre]->(g:Genres) return g\"\n x = tf.run(statement, {'username': current_user.username}).data()\n liked = [i['g']['name'] for i in x]\n return {'likedGenres': liked}\n\n@app.route('/saveGenres', methods=['POST'])\n@login_required\ndef saveGenres():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n tx = graph.begin()\n statement = \"MATCH (a:User {username: $username})-[r:likedGenre]->(b:Genres) DELETE r;\"\n tx.run(statement, {'username': current_user.username})\n for x in data['likedGenres']:\n statement = \"MATCH (a:User {username: $username}),(b:Genres {name: $genrename}) MERGE (a)-[r:likedGenre]->(b) RETURN r;\"\n tx.run(statement, {'username': current_user.username, 'genrename': x})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/getFriendRequests',methods=['GET'])\n@login_required\ndef getFriendRequests():\n if current_user.is_authenticated:\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $username})<-[:request]-(q:User) return q\"\n reqList = tx.run(statement, {'username': current_user.username}).data()\n reqList = [x['q'] for x in reqList]\n for x in reqList:\n statement = \"MATCH (p:User {username: $username })-[:likedGenre]->(g:Genres) return g\"\n genresList = tx.run(statement, {'username': x['username']}).data()\n genresList = [y['g']['name'] for y in genresList]\n x['likedGenres'] = \", \".join(genresList)\n\n return {\"requestQueue\": reqList}\n\n@app.route('/getAllUsers',methods=['GET'])\n@login_required\ndef getAllUsers():\n if current_user.is_authenticated:\n tx = graph.begin()\n statement = \"MATCH (p:User { username: $username}),(q:User) where not ((p)-[:friend]-(q)) and q.username <> $username return q;\"\n reqList = tx.run(statement, {'username': current_user.username}).data()\n reqList = [x['q'] for x in reqList]\n for x in reqList:\n statement = \"MATCH (p:User {username: $username })-[:likedGenre]->(g:Genres) return g\"\n genresList = tx.run(statement, {'username': x['username']}).data()\n genresList = [y['g']['name'] for y in genresList]\n x['likedGenres'] = \", \".join(genresList)\n\n return {\"userList\": reqList}\n\n@app.route('/removeFriend', methods=['POST'])\n@login_required\ndef removeFriend():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n tx=graph.begin()\n statement = \"MATCH (p:User {username: $username})-[r:friend]-(q:User {username: $friendUsername}) delete r;\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n statement = \"MATCH (r:Recommendation) where (r.friend1 = $friendUsername and r.friend2 = $username) or (r.friend2 = $friendUsername and r.friend1 = $username) detach delete r\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/removeFavoriteCelebrity', methods=['POST'])\n@login_required\ndef removeFavoriteCelebrity():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n name = data['name'].strip()\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $username})-[r:favorite]->(c:Celebrity {name: $celebName}) delete r;\"\n tx.run(statement,{'username': current_user.username, 'celebName': name})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/sendRequestToUser', methods=['POST'])\n@login_required\ndef sendRequestToUser():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $username}),(q:User {username: $friendUsername}) MERGE (p)-[r:request]->(q) return r;\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n tx.commit()\n return {'requestSent': True, 'error': \"NA\"}\n except Exception as e:\n return {'requestSent': False, 'error': \"Unknown Error\"}\n\n@app.route('/addCritic', methods=['POST'])\n@login_required\ndef addCritic():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n password = data['password'].strip()\n name = data['name'].strip()\n gender = data['gender'].strip()\n dob = data['dob'].strip()\n dtsObj = dts.strptime(dob,'%Y-%m-%d')\n user = User.query.filter_by(username = username).first()\n if user:\n return {'success': False, 'error': \"Username already exists\"}\n newCritic = User(username,generate_password_hash(password,method='sha256'),name,gender,dtsObj,True)\n db.session.add(newCritic)\n tx = graph.begin()\n statement = \"MERGE (p:Critic {username: $username,name: $name,gender: $gender,dob: $dob})\"\n tx.run(statement, {'username': username, 'name': name, 'gender': gender, 'dob': dob})\n db.session.commit()\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/getAllCritics', methods=['GET'])\n@login_required\ndef getAllCritics():\n if current_user.is_authenticated:\n criticList = [critic.username for critic in User.query.filter_by(isCritic = True)]\n # criticList = [f\"Critic{i}\" for i in range(1,21)]\n return {\"criticList\": criticList, \"error\": \"\"}\n\n@app.route('/removeCritic', methods=['POST'])\n@login_required\ndef removeCritic():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n user = User.query.filter_by(username = username).first()\n if not user:\n return {'success': False, 'error': \"Username does not exist\"}\n elif not user.isCritic:\n return {'success': False, 'error': \"Given username is not a Critic\"}\n tx = graph.begin()\n statement = \"MATCH (p:Critic {username: $username})-[r:review]->(m:Movies) return r,m.movie_id\"\n result = tx.run(statement, {'username': current_user.username}).data()\n for x in result:\n oldRating = x['r']['rating']\n statement = \"MATCH (m:Movies {movie_id: $movie_id}) SET m.avg_rating = 1.0*(m.avg_rating*(m.no_user_ratings+$critic_weight*m.no_critic_ratings)- $critic_weight*$oldRating)/(m.no_user_ratings+$critic_weight*(m.no_critic_ratings-1)),m.no_critic_ratings = m.no_critic_ratings-1 return m\"\n tx.run(statement, {'movie_id': x['m.movie_id'], 'critic_weight': critic_weight, 'oldRating': oldRating})\n statement = \"MATCH (p:Critic {username: $username}) detach delete p \"\n tx.run(statement, {'username': username})\n db.session.delete(user)\n db.session.commit()\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/addFriend', methods=['POST'])\n@login_required\ndef addFriend():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $friendUsername}),(q:User {username: $username}) CREATE (p)-[r:friend]->(q) return r;\"\n tx.run(statement,{'friendUsername': username, 'username': current_user.username})\n statement = \"MATCH (p:User {username: $friendUsername})-[r:request]-(q:User {username: $username}) DELETE r;\"\n tx.run(statement,{'friendUsername': username, 'username': current_user.username})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/deleteFriendRequest', methods=['POST'])\n@login_required\ndef deleteFriendRequest():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data['username'].strip()\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $friendUsername})-[r:request]->(q:User {username: $username}) DELETE r;\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/recommendFriends', methods=['POST'])\n@login_required\ndef recommendFriends():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n tx = graph.begin()\n movie_id = data['movie_id']\n for x in data['friendList']:\n statement = \"MERGE (a:Recommendation {friend1: $username, friend2: $friendUsername})\"\n tx.run(statement, {'friendUsername': x, 'username': current_user.username})\n statement = \"MATCH (a:User {username: $username}),(b:Recommendation {friend1: $username, friend2: $friendUsername}),(c:User {username: $friendUsername}),(d:Movies {movie_id: $movie_id}) MERGE (b)-[r:recommending_user]->(a) MERGE (b)-[q:to_whom_recommended]->(c) MERGE (b)-[p:movie_recommended]->(d) RETURN r;\"\n tx.run(statement, {'friendUsername': x,'username': current_user.username, 'movie_id': movie_id})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/markFavouriteDirector', methods=['POST'])\n@login_required\ndef markFavouriteDirector():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n directorName = data['directorName'].strip()\n tx = graph.begin()\n statement = \"MATCH (p:User {username: $username}),(c:Celebrity {name: $directorName}) MERGE (p)-[r:favorite]->(c) return r\"\n tx.run(statement,{'username': current_user.username, 'directorName': directorName})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/markFavouriteActors', methods=['POST'])\n@login_required\ndef markFavouriteActors():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n actorList = data['actorList']\n tx = graph.begin()\n for x in actorList:\n x = x.strip()\n statement = \"MATCH (p:User {username: $username}),(c:Celebrity {name: $actorname}) MERGE (p)-[r:favorite]->(c) return r\"\n tx.run(statement,{'username': current_user.username, 'actorname': x})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/getAllNotifications',methods=['GET'])\n@login_required\ndef getAllNotifications():\n if current_user.is_authenticated:\n tx = graph.begin()\n statement = \"MATCH (a:LikedRecommendation {friend2: $username})-[:movie_recommended]->(m:Movies) return a,a.friend1,m\"\n data = tx.run(statement, {'username': current_user.username}).data()\n nlist = []\n for i in data:\n d ={}\n d['username'] = i['a.friend1']\n d['movie_id'] = i['m']['movie_id']\n d['text'] = f\"{i['m']['title']}: {d['username']} liked your recommendation\"\n nlist.append(d)\n return {\"notificationList\": nlist}\n\n@app.route('/sendLikedRecommendation', methods=['POST'])\n@login_required\ndef sendLikedRecommendation():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data[\"username\"].strip()\n movie_id = data[\"movie_id\"]\n tx = graph.begin()\n statement = \"MERGE (a:LikedRecommendation {friend1: $username, friend2: $friendUsername})\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n statement = \"MATCH (a:User {username: $username}),(b:LikedRecommendation {friend1: $username, friend2: $friendUsername}),(c:User {username: $friendUsername}),(d:Movies {movie_id: $movie_id}) MERGE (b)-[r:recommending_user]->(c) MERGE (b)-[q:to_whom_recommended]->(a) MERGE (b)-[p:movie_recommended]->(d) RETURN r;\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username, 'movie_id': movie_id})\n statement = \"MATCH (b:Recommendation {friend2: $username, friend1: $friendUsername}),(d:Movies {movie_id: $movie_id}),(b)-[p:movie_recommended]->(d) DELETE p;\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username, 'movie_id': movie_id})\n statement = \"MATCH (b:Recommendation {friend2: $username, friend1: $friendUsername})-[:movie_recommended]->(d:Movies) return count(d);\"\n value = tx.run(statement, {'username': current_user.username, 'friendUsername': username}).data()\n value = value[0]['count(d)']\n if(value==0):\n statement = \"MATCH (b:Recommendation {friend2: $username, friend1: $friendUsername}) detach delete b\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/removeRecommendation', methods=['POST'])\n@login_required\ndef removeRecommendation():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data[\"username\"].strip()\n movie_id = data[\"movie_id\"]\n tx = graph.begin()\n statement = \"MATCH (b:Recommendation {friend2: $username, friend1: $friendUsername}),(d:Movies {movie_id: $movie_id}),(b)-[p:movie_recommended]->(d) DELETE p;\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username, 'movie_id': movie_id})\n statement = \"MATCH (b:Recommendation {friend2: $username, friend1: $friendUsername})-[:movie_recommended]->(d:Movies) return count(d);\"\n value = tx.run(statement, {'username': current_user.username, 'friendUsername': username}).data()\n value = value[0]['count(d)']\n if(value==0):\n statement = \"MATCH (b:Recommendation {friend2: $username, friend1: $friendUsername}) detach delete b\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/removeNotification', methods=['POST'])\n@login_required\ndef removeNotification():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n username = data[\"username\"].strip()\n movie_id = data[\"movie_id\"]\n tx = graph.begin()\n statement = \"MATCH (b:LikedRecommendation {friend2: $username, friend1: $friendUsername}),(d:Movies {movie_id: $movie_id}),(b)-[p:movie_recommended]->(d) DELETE p;\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username, 'movie_id': movie_id})\n statement = \"MATCH (b:LikedRecommendation {friend2: $username, friend1: $friendUsername})-[:movie_recommended]->(d:Movies) return count(d);\"\n value = tx.run(statement, {'username': current_user.username, 'friendUsername': username}).data()\n value = value[0]['count(d)']\n if(value==0):\n statement = \"MATCH (b:LikedRecommendation {friend2: $username, friend1: $friendUsername}) detach delete b\"\n tx.run(statement, {'username': current_user.username, 'friendUsername': username})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/rateMovie',methods=['POST'])\n@login_required\ndef rateMovie():\n if request.method == 'POST':\n try: \n data = json.loads(request.data)\n rating = data['rating']\n movie_id = data['movie_id']\n tx = graph.begin()\n if(current_user.isCritic):\n statement = \"MATCH (p:Critic {username: $username})-[r:review]->(m:Movies {movie_id: $movie_id}) return r \"\n result = tx.run(statement, {'username': current_user.username, 'movie_id': movie_id}).data()\n exists = len(result)>0\n if(exists):\n oldRating = result[0]['r']['rating']\n statement = \"MATCH (p:Critic {username: $username})-[r:review]->(m:Movies {movie_id: $movie_id}) SET r.rating = $rating return r \"\n tx.run(statement, {'username': current_user.username, 'movie_id': movie_id, 'rating': rating})\n statement = \"MATCH (m:Movies {movie_id: $movie_id}) SET m.avg_rating = m.avg_rating- $critic_weight*($oldRating-$rating)/(m.no_user_ratings+$critic_weight*m.no_critic_ratings) return m\"\n tx.run(statement, {'movie_id': movie_id,'critic_weight': critic_weight, 'oldRating': oldRating, 'rating': rating})\n else:\n statement = \"MATCH (p:Critic {username: $username}),(m:Movies {movie_id: $movie_id}) MERGE (p)-[r:review {rating: $rating }]->(m) return r \"\n tx.run(statement, {'username': current_user.username, 'movie_id': movie_id, 'rating': rating})\n statement = \"MATCH (m:Movies {movie_id: $movie_id}) SET m.avg_rating = 1.0*(m.avg_rating*(m.no_user_ratings+$critic_weight*m.no_critic_ratings)+$critic_weight*$rating)/($critic_weight+ m.no_user_ratings+$critic_weight*m.no_critic_ratings),m.no_critic_ratings = 1+m.no_critic_ratings return m\"\n tx.run(statement, {'movie_id': movie_id,'critic_weight': critic_weight, 'rating': rating})\n else:\n statement = \"MATCH (p:User {username: $username})-[r:rated]->(m:Movies {movie_id: $movie_id}) return r \"\n result = tx.run(statement, {'username': current_user.username, 'movie_id': movie_id}).data()\n exists = len(result)>0\n if(exists):\n oldRating = result[0]['r']['rating']\n statement = \"MATCH (p:User {username: $username})-[r:rated]->(m:Movies {movie_id: $movie_id}) SET r.rating = $movie_id return r \"\n tx.run(statement, {'username': current_user.username, 'movie_id': movie_id, 'rating': rating})\n statement = \"MATCH (m:Movies {movie_id: $movie_id}) SET m.avg_rating = m.avg_rating- 1.0*($oldRating-$rating)/(m.no_user_ratings+$critic_weight*m.no_critic_ratings) return m\"\n tx.run(statement, {'movie_id': movie_id,'critic_weight': critic_weight, 'oldRating': oldRating, 'rating': rating})\n else:\n statement = \"MATCH (p:User {username: $username}),(m:Movies {movie_id: $movie_id}) MERGE (p)-[r:rated {rating: $rating }]->(m) return r \"\n tx.run(statement, {'username': current_user.username, 'movie_id': movie_id, 'rating': rating})\n statement = \"MATCH (m:Movies {movie_id: $movie_id}) SET m.avg_rating = 1.0*(m.avg_rating*(m.no_user_ratings+$critic_weight*m.no_critic_ratings)+$rating)/(1+ m.no_user_ratings+$critic_weight*m.no_critic_ratings),m.no_user_ratings = 1+m.no_user_ratings return m\"\n tx.run(statement, {'movie_id': movie_id,'critic_weight': critic_weight, 'rating': rating})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/getMovieRating',methods=['POST'])\n@login_required\ndef getMovieRating():\n if current_user.is_authenticated and request.method=='POST':\n try:\n data = json.loads(request.data)\n movie_id = data['movie_id']\n tx = graph.begin()\n rating = 0\n review_text = \"\"\n if(current_user.isCritic):\n statement = \"MATCH (p:Critic {username: $username})-[r:review]->(m:Movies {movie_id: $movie_id}) return r\"\n result = tx.run(statement, {'username': current_user.username, 'movie_id': movie_id}).data()\n rating = result[0]['r']['rating'] if len(result)>0 else 0\n review_text = result[0]['r']['review_text'] if len(result)>0 else \"\"\n else:\n statement = \"MATCH (p:User {username: $username})-[r:rated]->(m:Movies {movie_id: $movie_id}) return r\"\n result = tx.run(statement, {'username': current_user.username, 'movie_id': movie_id}).data()\n rating = result[0]['r']['rating'] if len(result)>0 else 0\n return {'rating': rating, 'review': review_text,'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'rating': 0,'review': \"\",'success': False, 'error': \"Unknown Error1\"}\n\n@app.route('/getMovieDetails',methods=['POST'])\n@login_required\ndef getMovieDetails():\n if current_user.is_authenticated and request.method=='POST':\n try:\n data = json.loads(request.data)\n movie_id = data['movie_id']\n tx = graph.begin()\n statement = \"MATCH (m:Movies {movie_id: $movie_id}) return m;\"\n x = tx.run(statement, {'movie_id': movie_id}).data()\n y = x[0]['m']\n movieEntry = {}\n movieEntry['id'] = y['movie_id']\n movieEntry['title'] = y['title']\n movieEntry['year'] = y['year_released']\n movieEntry['rating'] = round(y['avg_rating'],2)\n movieEntry['duration'] = y['duration']\n statement = \"MATCH (m:Movies {movie_id: $movie_id})-[:is_genre]->(g:Genres) return g;\"\n genres = tx.run(statement, {'movie_id': y['movie_id']}).data()\n genreList = [[g['g']['name'], colorDic[g['g']['name']]] for g in genres]\n # movieEntry['genre'] = \", \".join(genreList)\n movieEntry['genreList'] = genreList\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:acted_in]-(g:Celebrity) return g;\"\n actors = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['actors'] = [g['g']['name'] for g in actors]\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[:directed]-(g:Celebrity) return g;\"\n director = tx.run(statement, {'movie_id': y['movie_id']}).data() \n movieEntry['director'] = director[0]['g']['name'] if (len(director)>0) else \"\"\n reviews = []\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[r:review]-(g:Critic) return g,r.review_text;\"\n reviewtemp = tx.run(statement, {'movie_id': y['movie_id']}).data()\n for i in reviewtemp:\n reviewDic = {}\n reviewDic['reviewedBy'] = i['g']['name']\n reviewDic['content'] = i['r.review_text']\n if reviewDic['content'] is not None:\n reviews.append(reviewDic)\n movieEntry['reviews'] = reviews\n movieEntry['numUsers'] = y['no_user_ratings']\n return {'movieDic': movieEntry,'success': True,'error': \"NA\"}\n except Exception as e:\n return {'movieDic': {},'success': False, 'error': \"Unknown Error2\"}\n\n@app.route('/addMovie', methods=['POST'])\n@login_required\ndef addMovie():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n title = data['title'].strip()\n year_released = data['year']\n url = data['url'].strip()\n rating = float(data['rating'])\n duration = data['duration']\n directorName = data['director'].strip()\n actorList = data['actorList']\n genreList = data['genreList']\n tx = graph.begin()\n statement = \"MATCH (m:Movies) return max(m.movie_id) as mid\"\n movie_id = tx.run(statement).data()\n movie_id = movie_id[0]['mid']+1\n statement = \"MERGE (m:Movies {movie_id: $movie_id,title: $title,year_released: $year_released,url: $url,avg_rating: $rating,no_user_ratings: 0,no_critic_ratings: 1,duration: $duration})\"\n tx.run(statement,{'movie_id': movie_id, 'title': title, 'year_released': year_released, 'url': url, 'rating': rating, 'duration': duration})\n if(current_user.isCritic):\n statement = \"MATCH (c:Critic {username: $username}),(m:Movies {movie_id: $movie_id}) MERGE (c)-[r:review {rating: $rating}]->(m) return r\"\n tx.run(statement, {'username': current_user.username,'movie_id': movie_id, 'rating': rating})\n statement = \"MATCH (c:Celebrity {name: $directorName}) return c\"\n results = tx.run(statement, {'directorName': directorName}).data()\n if len(results)==0:\n statement = \"MATCH (c:Celebrity) return max(c.id) as id\"\n id = tx.run(statement).data()\n id = id[0]['id']+1\n statement = \"MERGE (c:Celebrity {id: $id,name: $directorName, gender: 'Null', dob: 'Null'})\" \n tx.run(statement, {'id': id, 'directorName': directorName})\n statement = \"MATCH (c:Celebrity {name: $directorName}), (m:Movies {movie_id: $movie_id}) MERGE (c)-[r:directed]->(m) return r\"\n tx.run(statement, {'directorName': directorName, 'movie_id': movie_id})\n \n for actorName in actorList:\n actorName = actorName.strip()\n statement = \"MATCH (c:Celebrity {name: $actorName}) return c\"\n results = tx.run(statement, {'actorName': actorName}).data()\n if len(results)==0:\n statement = \"MATCH (c:Celebrity) return max(c.id) as id\"\n id = tx.run(statement).data()\n id = id[0]['id']+1\n statement = \"MERGE (c:Celebrity {id: $id,name: $actorName, gender: 'Null', dob: 'Null'})\" \n tx.run(statement, {'id': id, 'actorName': actorName})\n statement = \"MATCH (c:Celebrity {name: $actorName}), (m:Movies {movie_id: $movie_id}) MERGE (c)-[r:acted_in]->(m) return r\"\n tx.run(statement, {'actorName': actorName, 'movie_id': movie_id})\n\n for name in genreList:\n name = name.strip()\n statement = \"MATCH (c:Genres {name: $name}), (m:Movies {movie_id: $movie_id}) MERGE (c)<-[r:is_genre]-(m) return r\"\n tx.run(statement, {'name': name, 'movie_id': movie_id})\n\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/modifyGenres',methods=['POST'])\n@login_required\ndef modifyGenres():\n if current_user.is_authenticated and request.method=='POST':\n try:\n data = json.loads(request.data)\n movie_id = data['movie_id']\n genreList = data['genreList']\n tx =graph.begin()\n statement = \"MATCH (m:Movies {movie_id: $movie_id})-[r:is_genre]->(g:Genres) delete r\"\n tx.run(statement, {'movie_id': movie_id})\n for genre in genreList:\n genre = genre.strip()\n statement = \"MATCH (m:Movies {movie_id: $movie_id}),(g:Genres {name: $name}) MERGE (m)-[r:is_genre]->(g) return r\"\n tx.run(statement, {'movie_id': movie_id, 'name': genre})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/addReview',methods=['POST'])\n@login_required\ndef addReview():\n if current_user.is_authenticated and request.method=='POST':\n try:\n data = json.loads(request.data)\n movie_id = data['movie_id']\n review_text = data['reviewText']\n tx = graph.begin()\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[r:review]-(g:Critic {username: $username}) return count(r)\"\n result = tx.run(statement, {'movie_id': movie_id, 'username': current_user.username}).data()\n result = result[0]['count(r)']\n if(result>0):\n statement = \"MATCH (m:Movies {movie_id: $movie_id})<-[r:review]-(g:Critic {username: $username}) SET r.review_text = $review_text return r\"\n else:\n statement = \"MATCH (m:Movies {movie_id: $movie_id}),(g:Critic {username: $username}) MERGE (m)<-[r:review {rating: 0,review_text: $review_text}]-(g) return r\"\n tx.run(statement, {'movie_id': movie_id, 'username': current_user.username, 'review_text': review_text})\n tx.commit()\n return {'success': True, 'error': \"NA\"}\n except Exception as e:\n return {'success': False, 'error': \"Unknown Error\"}\n\n@app.route('/getAdminAnalytics',methods=['GET'])\n@login_required\ndef getAdminAnalytics():\n if current_user.is_authenticated:\n try:\n analyticsDic = {}\n analyticsDic['numUsers'] = User.query.filter_by(isCritic=False).count() - 1\n analyticsDic['numCritics'] = User.query.filter_by(isCritic=True).count() \n tx = graph.begin()\n analyticsDic['numMovies'] = tx.run(\"MATCH (m:Movies) return count(m) AS c;\").data()[0][\"c\"]\n analyticsDic['numRatings'] = tx.run(\"MATCH (u:User)-[r:rated]->(m:Movies) return count(r) AS c;\").data()[0][\"c\"]\n analyticsDic['numReviews'] = tx.run(\"MATCH (c:Critic)-[r:review]->(m:Movies) return count(r) AS c;\").data()[0][\"c\"]\n return {'error': False, 'analyticsDic': analyticsDic}\n except Exception as e:\n return {'error': True, 'analyticsDic': {}}\n\n# @app.route('/getRecommendations', methods=['GET'])\n# def getRecommendationsS1(user_id,threshold):\n# # In Strategy 1, the similarity between two users u1 and u2 is the proportion of movies they have in common\n# # The score of one given movie m is the proportion of users similar to u1 who rated m\n\n# query = (### Similarity normalization : count number of movies seen by u1 ###\n# # Count movies rated by u1 as countm\n# 'MATCH (u1:`User` {user_id:$user_id})-[:`Has_rated`]->(m1:`Movie`) '\n# 'WITH count(m1) as countm '\n# ### Score normalization : count number of users who are considered similar to u1 ###\n# # Retrieve all users u2 who share at least one movie with u1\n# 'MATCH (u1:`User` {user_id:$user_id})-[:`Has_rated`]->(m1:`Movie`) '\n# 'MATCH (m1)<-[r:`Has_rated`]-(u2:`User`) '\n# 'WHERE NOT u2=u1 '\n# # Compute similarity\n# 'WITH u2, countm, tofloat(count(r))/countm as sim '\n# # Keep users u2 whose similarity with u1 is above some threshold\n# 'WHERE sim>$threshold '\n# # Count number of similar users as countu\n# 'WITH count(u2) as countu, countm '\n# ### Recommendation ###\n# # Retrieve all users u2 who share at least one movie with u1\n# 'MATCH (u1:`User` {user_id:$user_id})-[:`Has_rated`]->(m1:`Movie`) '\n# 'MATCH (m1)<-[r:`Has_rated`]-(u2:`User`) '\n# 'WHERE NOT u2=u1 '\n# # Compute similarity\n# 'WITH u1, u2,countu, tofloat(count(r))/countm as sim '\n# # Keep users u2 whose similarity with u1 is above some threshold\n# 'WHERE sim>$threshold '\n# # Retrieve movies m that were rated by at least one similar user, but not by u1\n# 'MATCH (m:`Movie`)<-[r:`Has_rated`]-(u2) '\n# 'WHERE NOT (m)<-[:`Has_rated`]-(u1) '\n# # Compute score and return the list of suggestions ordered by score\n# 'RETURN DISTINCT m, tofloat(count(r))/countu as score ORDER BY score DESC LIMIT 20 ')\n# result = graph.run(query, {'user_id': user_id, 'threshold': threshold}).data()\n# return result", "sub_path": "api/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 53970, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 53, "usage_type": "call"}, {"api_name": "flask_login.UserMixin", "line_number": 54, "usage_type": "name"}, {"api_name": "sqlalchemy.CheckConstraint", "line_number": 59, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 72, "usage_type": "call"}, {"api_name": "py2neo.Graph", "line_number": 76, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 84, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 85, "usage_type": "name"}, {"api_name": "flask_login.current_user.isCritic", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request.method", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 91, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "werkzeug.security.check_password_hash", "line_number": 99, "usage_type": "call"}, {"api_name": "flask_login.login_user", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 117, "usage_type": "name"}, {"api_name": "werkzeug.security.generate_password_hash", "line_number": 121, "usage_type": "call"}, {"api_name": "flask_login.login_user", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 136, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 136, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 138, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 134, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 145, "usage_type": "name"}, {"api_name": "flask_login.current_user.name", "line_number": 146, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 146, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 146, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.isCritic", "line_number": 146, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 153, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 153, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 153, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 173, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 173, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 174, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 174, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 151, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 202, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 202, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 203, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 203, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 203, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 200, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 250, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 250, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 253, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 253, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 248, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 302, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 302, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 305, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 305, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 312, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 312, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 300, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 353, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 353, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 357, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 357, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 351, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 367, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 367, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 370, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 370, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 365, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 379, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 379, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 377, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 394, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 394, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 397, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 397, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 392, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 404, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 404, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 406, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 406, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 406, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 409, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 409, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 412, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 412, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 402, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 421, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 421, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 424, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 424, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 419, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 437, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 437, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 440, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 440, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 435, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 453, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 453, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 455, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 455, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 455, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 459, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 459, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 461, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 461, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 451, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 470, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 470, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 472, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 472, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 472, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 476, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 476, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 468, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 485, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 485, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 487, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 487, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 487, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 491, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 491, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 483, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 500, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 500, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 502, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 502, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 502, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 508, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 508, "usage_type": "name"}, {"api_name": "werkzeug.security.generate_password_hash", "line_number": 512, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 498, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 526, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 526, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 524, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 534, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 534, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 536, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 536, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 536, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 545, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 545, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 532, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 562, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 562, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 564, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 564, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 564, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 568, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 568, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 570, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 570, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 560, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 579, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 579, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 581, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 581, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 581, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 585, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 585, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 577, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 594, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 594, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 596, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 596, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 596, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 601, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 601, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 603, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 603, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 592, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 612, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 612, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 614, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 614, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 614, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 618, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 618, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 610, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 627, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 627, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 629, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 629, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 629, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 635, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 635, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 625, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 644, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 644, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 647, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 647, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 642, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 660, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 660, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 662, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 662, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 662, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 667, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 667, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 669, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 669, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 671, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 671, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 673, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 673, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 677, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 677, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 658, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 686, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 686, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 688, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 688, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 688, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 693, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 693, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 695, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 695, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 699, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 699, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 684, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 708, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 708, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 710, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 710, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 710, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 715, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 715, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 717, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 717, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 721, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 721, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 706, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 730, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 730, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 732, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 732, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 732, "usage_type": "name"}, {"api_name": "flask_login.current_user.isCritic", "line_number": 736, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 736, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 738, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 738, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 743, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 743, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 748, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 748, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 753, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 753, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 758, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 758, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 763, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 763, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 728, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 774, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 774, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 774, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 774, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 776, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 776, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 776, "usage_type": "name"}, {"api_name": "flask_login.current_user.isCritic", "line_number": 781, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 781, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 783, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 783, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 788, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 788, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 772, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 797, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 797, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 797, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 797, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 799, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 799, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 799, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 795, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 840, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 840, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 842, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 842, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 842, "usage_type": "name"}, {"api_name": "flask_login.current_user.isCritic", "line_number": 857, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 857, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 859, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 859, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 838, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 897, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 897, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 897, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 897, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 899, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 899, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 899, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 895, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 917, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 917, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 917, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 917, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 919, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 919, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 919, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 924, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 924, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 930, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 930, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 915, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 939, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 939, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 937, "usage_type": "name"}]} +{"seq_id": "22442268", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 11 12:57:55 2016\n@author: slauniai\n\n\nsoil.biology -subpackage function\n\nContains functions and parameters related to biological activity of soil\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import interp1d \nfrom itertools import product\nfrom pyproj import Proj, transform\n\ndef soilRespiration(GisData, soildata, Ts, Wliq, imm_n, imm_p, decopara=None, soiltype='Yolo', limitpara=None, dt=1):\n \"\"\"\n computes soil respiration rate (CO2-flux) based on Pumpanen et al. (2003) Soil.Sci.Soc.Am\n Restricts respiration as in Skopp et al. (1990),Soil.Sci.Soc.Am\n IN:\n minmask - minerla soil mask (array where mineral soil is 1) \n Ts - soil temperature (degC)\n Wliq - soil vol. moisture content (m3 m-3)\n poros - soil porosity (m3 m-3)\n decopara - [R10, Q10]\n limitpara - [a,b,d,g] of Skopp -model\n OUT:\n rsoil - soil respiration rate\n fm - relative modifier (Skopp et al.)\n co2mi - array of co2 efflux from mineral soil, kg/ha/day\n \"\"\"\n \n nrows = int(((GisData['info'][0]).split()[1])) #shape c,r\n ncols = int(((GisData['info'][1]).split()[1]))\n #ixm = np.where(np.ravel(GisData['smc'])==1)\n ixm = np.equal(GisData['smc'],1)\n\n ix_1 = np.where(np.logical_and(np.equal(GisData['sfc'],1),np.equal(GisData['smc'],1))) # Lehto fertility\n ix_2 = np.where(np.logical_and(np.equal(GisData['sfc'],2),np.equal(GisData['smc'],1))) # Lehtomainen fertility\n ix_3 = np.where(np.logical_and(np.equal(GisData['sfc'],3),np.equal(GisData['smc'],1))) # Tuore fertility\n ix_4 = np.where(np.logical_and(np.equal(GisData['sfc'],4),np.equal(GisData['smc'],1))) # Kuivahko fertility\n ix_5 = np.where(np.logical_and(np.greater_equal(GisData['sfc'],5),np.equal(GisData['smc'],1))) # Kuiva fertility\n\n ix={'sfc_1': ix_1,'sfc_2': ix_2, 'sfc_3': ix_3, 'sfc_4': ix_4, 'sfc_5': ix_5} \n N = {'sfc_1': 2.4,'sfc_2': 2.2, 'sfc_3': 1.8, 'sfc_4': 1.6, 'sfc_5': 1.4} # Tamminen 1991 folia forestalia 777, page 18 Table 16: N cont in OM % dm\n P = {'sfc_1': 0.17,'sfc_2': 0.15, 'sfc_3': 0.13, 'sfc_4': 0.11, 'sfc_5': 0.1} \n\n Ts = 16. if Ts>16. else Ts\n Ts=-45. if Ts<-5. else Ts \n co2mi = np.empty((ncols,nrows)); co2mi[:]=np.nan\n fm = np.empty((ncols,nrows)); fm[:]=np.nan\n Nrel = np.empty((ncols,nrows)); Nrel[:]=np.nan\n Prel = Nrel.copy()\n poros= soildata['poros'] #GisData['params']['poros']\n\n sp={'Yolo':[3.83, 4.43, 1.25, 0.854], \n 'Valentine': [1.65,6.15,0.385,1.03]} #Skopp param [a,b,d,g], Table 1 \n if decopara is None:\n Q10=2.3; R10=4.0; # (-), umol m-2s-1\n else:\n decopara=np.array(decopara, ndmin=1)\n Q10=decopara[:,1]; R10=decopara[:,0]\n \n if limitpara is None:\n p=sp[soiltype]\n else:\n p=limitpara\n \n #unrestricted rate \n rs0=R10*np.power(Q10, (Ts-10.0)/10.0) #umol m-2 s-1\n rs0 = rs0 *1e-6 * 44.0 * 1e-3 * 1e4 * 86400 * 0.4 #, -> mol -> g (44 g/mol) -> kg -> ha ->day, to heterotrophic respiration \n if Ts<-10. : rs0 = 0.0 \n #fm=np.minimum(p[0]*Wliq**p[2], p[1]*afp**p[3]) #]0...1]\n #fm=np.minimum(fm, np.ones(np.shape(fm)))\n fm=np.minimum(p[0]*Wliq[ixm]**p[2], p[1]*(poros[ixm]-Wliq[ixm])**p[3]) #]0...1]\n fm=np.minimum(fm, np.ones(np.shape(fm)))\n co2mi[ixm]=rs0 * fm * dt\n \n #N release\n C_in_OM = 0.55 # C content in OM kg kg-1\n CO2_to_C = 12./44.\n Nmicrob = imm_n # microbial immobilisation \n Pmicrob = imm_p # microbial immobilisation\n for k in ix.keys():\n Nrel[ix[k]] = co2mi[ix[k]] * CO2_to_C / C_in_OM * N[k] / 100. * (1.-Nmicrob) #Nrelease kg ha-1 day-1 \n Prel[ix[k]] = co2mi[ix[k]] * CO2_to_C / C_in_OM * P[k] / 100. * (1.-Pmicrob) #Nrelease kg ha-1 day-1 \n \n return Nrel, Prel, ixm\n\n\n#def understory_biomass(site, age, x=[]):\ndef understory_uptake(lat, lon, ts, gisdata, expected_yield, simtime):\n \"\"\"\n Created on Wed Jun 18 12:07:47 2014\n\n @author: slauniai\n\n understory_biomass(site, age, x=[]): \n Computes understory biomasses using models of Muukkonen & Makipaa, 2006 Bor. Env. Res.\\n\n INPUT:\n lat - latitude in YKJ or EUREF equivalent \n lon - longitude \n ts - annual temperature sum in degree days \n gisdata - includes np arrays of catchment stand and site properties\n expected_yield of stand during the simulation period m3 ha-1\n simtime - simulation time in years\n x - array of independent variables (optional, if not provided age-based model is used):\n x[0]=lat (degN, in decimal degrees)\n x[1]=lon (degE in decimal degrees) \n x[2]=elev (m)\n x[3]=temperature sum (degC)\n x[4]=site nutrient level (-) \n x[5]=stem vol. (m3 ha-1)\n x[6]=stem nr (ha-1)\n x[7]=basal area (m2 ha-1)\n x[8]=site drainage status,integer\n OUTPUT:\n y - dry biomasses (kg ha-1) of different groups\\n\n SOURCE:\n Muukkonen & Makipaa, 2006. Bor.Env.Res. 11, 355-369.\\n\n AUTHOR:\n Samuli Launiainen 18.06.2014, Modified for array operations by Ari Laurén 13.4.2020 \\n\n NOTE:\n Multi-regression models not yet tested!\n In model equations independent variables named differently to M&M (2006): here x[0] = z1, x[1]=z2, ... x[7]=z8 and x[8]=z10\\n\n \\n\n Site nutrient level x[4] at upland sites:\n 1: herb-rich forest \n 2: herb-rich heat f. \n 3: mesic heath f. \n 4: sub-xeric heath f.\n 5: xeric heath f. \n 6: barren heath f.\n 7: rock,cliff or sand f. \n Site nutrient level x[4] at mires:\\n\n 1: herb-rich hw-spruce swamps, pine mires, fens, \n 2: V.myrtillus / tall sedge spruce swamps, tall sedge pine fens, tall sedge fens,\n 3: Carex clobularis / V.vitis-idaea swamps, Carex globularis pine swamps, low sedge (oligotrophic) fens,\n 4: Low sedge, dwarf-shrub & cottongrass pine bogs, ombo-oligotrophic bogs,\n 5: S.fuscum pine bogs, ombotrophic and S.fuscum low sedge bogs.\n Drainage status x[8] at mires (Paavilainen & Paivanen, 1995):\n 1: undrained\n 2: Recently draines, slight effect on understory veg., no effect on stand\n 3: Transforming drained mires, clear effect on understory veg and stand\n 4: Transforming drained mires, veget. resembles upland forest site type, tree-stand forest-like.\n \n \"\"\"\n \n \n #------------- classify and map pixels-------------------------------------------------------- \n ix_pine_upland = np.where( np.logical_and( np.logical_and(np.greater_equal( gisdata['p_vol'],gisdata['s_vol']), \n np.greater_equal( gisdata['p_vol'],gisdata['b_vol'])) ,\n np.equal(gisdata['smc'], 1)))\n ix_spruce_upland = np.where( np.logical_and( np.logical_and(np.greater_equal( gisdata['s_vol'],gisdata['p_vol']), \n np.greater_equal( gisdata['s_vol'],gisdata['b_vol'])) ,\n np.equal(gisdata['smc'], 1)))\n ix_broadleaved_upland = np.where( np.logical_and( np.logical_and(np.greater_equal( gisdata['b_vol'],gisdata['p_vol']), \n np.greater_equal( gisdata['b_vol'],gisdata['s_vol'])) ,\n np.equal(gisdata['smc'], 1)))\n ix_spruce_mire = np.where(np.equal(gisdata['smc'], 2))\n ix_pine_bog = np.where(np.equal(gisdata['smc'], 3))\n ix_open_peat = np.where(np.equal(gisdata['smc'], 4))\n \n #---------------------------------------\n #latitude = 0.0897*lat/10000. + 0.3462 #approximate conversion to decimal degrees within Finland, N\n #longitude = 0.1986*lon/10000. + 17.117 #approximate conversion to decimal degrees within Finland in degrees E\n inProj = Proj(init='epsg:3067')\n outProj = Proj(init='epsg:4326')\n longitude,latitude = transform(inProj,outProj,lon,lat)\n Nstems = 900. # x6 number of stems -ha, default 900\n drain_s =4 # x8 drainage status, default value 4\n\n #---------------------------------------\n\n def gv_biomass_and_nutrients(gisdata, ix_pine_upland, ix_spruce_upland, ix_broadleaved_upland, ix_spruce_mire, ix_pine_bog,\n ix_open_peat, latitude, longitude, dem, ts, sfc, vol, Nstems, ba, drain_s, age): \n #--------------- nutrient contents in vegetation-----------------------\n \"\"\"\n Computed:\n - total biomass and bottom layer; field layer is gained as a difference of tot and bottom layer (more cohrent results)\n - N and P storage in the each pixel\n - annual use of N and P due to litterfall\n Muukkonen Mäkipää 2005 upland sites: field layer contains dwarf shrubs and (herbs + grasses), see Fig 1\n share dwarf shrubs herbs \n - Pine 91% 9%\n - Spruce 71% 29%\n - broad l 38% 62%\n Peatland sites (assumption):\n share dwarf shrubs herbs\n - Pine bogs 90% 10%\n - Spruce mires 50% 50%\n Palviainen et al. 2005 Ecol Res (2005) 20: 652–660, Table 2\n Nutrient concentrations for\n N P\n - Dwarf shrubs 1.2% 1.0mg/g\n - herbs & grasses 1.8% 2.0mg/g\n - upland mosses 1.25% 1.4 mg/g\n Nutrient concentrations for sphagna (FIND):\n N P for N :(Bragazza et al Global Change Biology (2005) 11, 106–114, doi: 10.1111/j.1365-2486.2004.00886.x)\n - sphagnum 0.6% 1.4 mg/g (Palviainen et al 2005) \n Annual litterfall proportions from above-ground biomass (Mälkönen 1974, Tamm 1953):\n - Dwarf shrubs 0.2\n - herbs & grasses 1\n - mosses 0.3\n Tamm, C.O. 1953. Growth, yield and nutrition in carpets of a forest moss (Hylocomium splendens). Meddelanden från Statens Skogsforsknings Institute 43 (1): 1-140.\n We assume retranslocation of N and P away from senescing tissues before litterfall:\n N P\n - Dwarf shrubs 0.5 0.5\n - Herbs & grasses 0.5 0.5\n - mossess 0.0 0.0\n \n Turnover of total biomass including the belowground biomass is assumed to be 1.2 x above-ground biomass turnover\n \n \"\"\"\n\n fl_share = {'description': 'share of dwarf shrubs (ds) and herbas & grasses (h) from field layer biomass, kg kg-1',\n 'pine_upland':{'ds': 0.87, 'h': 0.13}, 'spruce_upland':{'ds': 0.71, 'h': 0.29}, \n 'broadleaved_upland':{'ds': 0.38, 'h': 0.62}, 'spruce_mire':{'ds': 0.90, 'h': 0.10}, \n 'pine_bog':{'ds': 0.50, 'h': 0.50}}\n nut_con ={'description': 'nutrient concentration of dwarf shrubs (ds), herbs & grasses (h), upland mosses (um), and sphagna (s), unit mg/g',\n 'ds':{'N':12.0, 'P':1.0}, 'h':{'N':18.0, 'P':2.0}, 'um':{'N':12.5, 'P':1.4}, 's':{'N':6.0, 'P':1.4}}\n lit_share = {'description': 'share of living biomass that is lost as litter annually for dwarf shrubs (ds), herbs & grasses (h), upland mosses (um), and sphagna (s), unit: kg kg-1',\n 'ds': 0.2, 'h': 0.5, 'um': 0.3, 's': 0.3}\n retrans ={'description': 'share of nutrients retranslocated before litterfallfor dwarf shrubs (ds), herbs & grasses (h), upland mosses (um), and sphagna (s), unit: kg kg-1',\n 'ds': {'N':0.5, 'P':0.5},'h': {'N':0.5, 'P':0.5}, \n 'um': {'N':0.0, 'P':0.0},'s': {'N':0.0, 'P':0.0}}\n fl_to_total_turnover = 1.2 # converts the turnover of above-ground bionmass to total including root turnover\n fl_above_to_total = 1.7 # converts aboveground biomass to total biomass \n \n #--------- create output arrays -----------------------------\n gv_tot = np.zeros(np.shape(gisdata['cmask'])) # Ground vegetation mass kg ha-1\n gv_field = np.zeros(np.shape(gisdata['cmask'])) # Field layer vegetation mass\n gv_bot = np.zeros(np.shape(gisdata['cmask'])) # Bottom layer vegetation mass\n ds_litterfall = np.zeros(np.shape(gisdata['cmask'])) # dwarf shrub litterfall kg ha-1 yr-1\n h_litterfall = np.zeros(np.shape(gisdata['cmask'])) # herbs and grasses litterfall kg ha-1 yr-1\n um_litterfall = np.zeros(np.shape(gisdata['cmask'])) # upland mosses litterfall kg ha-1 yr-1\n s_litterfall = np.zeros(np.shape(gisdata['cmask'])) # sphagnum mosses litterfall kg ha-1 yr-1\n nup_litter = np.zeros(np.shape(gisdata['cmask'])) # N uptake due to litterfall kg ha-1 yr-1\n pup_litter = np.zeros(np.shape(gisdata['cmask'])) # P uptake due to litterfall kg ha-1 yr-1\n n_gv = np.zeros(np.shape(gisdata['cmask'])) # N in ground vegetation kg ha-1\n p_gv = np.zeros(np.shape(gisdata['cmask'])) # P in ground vegetation kg ha-1\n zeromask = np.zeros(np.shape(gisdata['cmask']))\n \"\"\"------ Ground vegetation models from Muukkonen & Mäkipää 2006 BER vol 11, Tables 6,7,8\"\"\" \n #***************** Pine upland ***************************************\n ix = ix_pine_upland\n # dependent variable is sqrt(biomass -0.5), final output in kg ha-1\n gv_tot[ix] = np.square(22.523 + 0.084*sfc[ix]*latitude + 0.01*longitude*age[ix] -0.031*sfc[ix]*age[ix] \\\n -7e-4*np.square(age[ix]) -3e-4*np.square(vol[ix]) +6e-4*vol[ix]* age[ix]) -0.5 +231.56 #Total\n gv_field[ix] = np.square(13.865 +0.013*latitude*longitude -2.969*sfc[ix] +3e-5*ts*age[ix]) - 0.5 + 96.72 #Field layer total. >last term correction factor\n gv_bot[ix] = np.square(8.623 +0.09*sfc[ix]*latitude +0.004*longitude*age[ix] +3e-5*dem[ix]*ts \\\n -3e-4*np.square(vol[ix]) -5e-4*np.square(age[ix]) + 8e-4*vol[ix]*age[ix] )-0.5 + 355.13 #Bottom layer total\n # removing inconsistent values\n gv_field[ix] = np.minimum(gv_tot[ix], gv_field[ix])\n gv_bot[ix] = np.minimum(gv_tot[ix], gv_bot[ix]) \n gv_field[ix] = np.maximum(gv_field[ix], gv_tot[ix] - gv_bot[ix])\n \n #------------------------------------------------------------------\n #annual litterfall rates and nutrient uptake due to litterfall\n ds_litterfall[ix] = fl_share['pine_upland']['ds']*gv_field[ix]*lit_share['ds']*fl_to_total_turnover\n h_litterfall[ix] = fl_share['pine_upland']['h']*gv_field[ix]*lit_share['h']*fl_to_total_turnover\n um_litterfall[ix] = gv_bot[ix]*lit_share['um']\n n_gv[ix] = gv_field[ix] * fl_share['pine_upland']['ds']*nut_con['ds']['N']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['pine_upland']['h']*nut_con['h']['N']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['um']['N']*1e-3\n p_gv[ix] = gv_field[ix] * fl_share['pine_upland']['ds']*nut_con['ds']['P']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['pine_upland']['h']*nut_con['h']['P']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['um']['P']*1e-3 \n nup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['N']*1e-3 * (1.0 -retrans['ds']['N']) \\\n +h_litterfall[ix] * nut_con['h']['N']*1e-3 * (1.0 -retrans['h']['N']) \\\n +um_litterfall[ix] * nut_con['um']['N']*1e-3 * (1.0 -retrans['um']['N'])\n pup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['P']*1e-3 * (1.0 -retrans['ds']['P']) \\\n +h_litterfall[ix] * nut_con['h']['P']*1e-3 * (1.0 -retrans['h']['P']) \\\n +um_litterfall[ix] * nut_con['um']['P']*1e-3 * (1.0 -retrans['um']['P'])\n\n #***************** Spruce upland *************************************** \n ix = ix_spruce_upland\n gv_tot[ix] = np.square(22.522 +0.026*sfc[ix]*age[ix] +0.11*sfc[ix]*latitude -0.003*sfc[ix]*ts) -0.5 + 206.67 #Total\n gv_field[ix] = np.square(-42.593 + 0.981*latitude -0.008*np.square(ba[ix]) +0.002*ba[ix]*age[ix])-0.5 + 67.15 #Field layer total\n # removing inconsistent values\n gv_field[ix] = np.minimum(gv_tot[ix], gv_field[ix]) \n #gv_bot_ei orig laskennasssa, mutta mosses vastaa 75% gv biomassasta joten hyvä olla mukana\n gv_bot[ix] = np.square(9.672+0.029*sfc[ix]*age[ix]+0.078*sfc[ix]*latitude+0.186*ba[ix]) -0.5 +264.82 #mosses (not bottom layer total available )\n # removing inconsistent values, tarvitaanko näitä enää kun tarkistettu\n gv_bot[ix] = np.minimum(gv_tot[ix], gv_bot[ix]) # taa ei orig laskennassa\n\n \n \n #annual litterfall rates\n ds_litterfall[ix] = fl_share['spruce_upland']['ds']*gv_field[ix]*lit_share['ds']*fl_to_total_turnover\n h_litterfall[ix] = fl_share['spruce_upland']['h']*gv_field[ix]*lit_share['h']*fl_to_total_turnover\n um_litterfall[ix] = gv_bot[ix]*lit_share['um']\n n_gv[ix] = gv_field[ix] * fl_share['spruce_upland']['ds']*nut_con['ds']['N']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['spruce_upland']['h']*nut_con['h']['N']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['um']['N']*1e-3\n p_gv[ix] = gv_field[ix] * fl_share['spruce_upland']['ds']*nut_con['ds']['P']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['spruce_upland']['h']*nut_con['h']['P']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['um']['P']*1e-3\n nup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['N']*1e-3 * (1.0 -retrans['ds']['N']) \\\n +h_litterfall[ix] * nut_con['h']['N']*1e-3 * (1.0 -retrans['h']['N']) \\\n +um_litterfall[ix] * nut_con['um']['N']*1e-3 * (1.0 -retrans['um']['N'])\n pup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['P']*1e-3 * (1.0 -retrans['ds']['P']) \\\n +h_litterfall[ix] * nut_con['h']['P']*1e-3 * (1.0 -retrans['h']['P']) \\\n +um_litterfall[ix] * nut_con['um']['P']*1e-3 * (1.0 -retrans['um']['P'])\n \n #***************** Broadleaved upland *************************************** \n ix = ix_broadleaved_upland\n gv_tot[ix] = np.square(19.8 +0.691*sfc[ix]*latitude -38.578*sfc[ix])-0.5 + 156.51 #Total\n gv_field[ix] = np.square(-95.393 +0.094*latitude*longitude -1e-6*Nstems*ts -0.106*longitude**2 +5e-4*latitude*ts)-0.5 + 55.40 #Field layer total\n #alla ei orig laskennssa gv_bot\n gv_bot[ix] = np.square(20.931+0.096*sfc[ix]*latitude-0.0006*longitude*ts)-0.5 + 236.6 #mosses (not bottom layer total available )\n # removing inconsistent values#samoin kuinn spruce upland?\n gv_field[ix] = np.minimum(gv_tot[ix], gv_field[ix]) \n gv_bot[ix] = np.minimum(gv_tot[ix], gv_bot[ix]) # taa ei orig laskennassa\n\n \n #annual litterfall rates\n ds_litterfall[ix] = fl_share['broadleaved_upland']['ds']*gv_field[ix]*lit_share['ds']*fl_to_total_turnover\n h_litterfall[ix] = fl_share['broadleaved_upland']['h']*gv_field[ix]*lit_share['h']*fl_to_total_turnover\n um_litterfall[ix] = gv_bot[ix]*lit_share['um']\n n_gv[ix] = gv_field[ix] * fl_share['broadleaved_upland']['ds']*nut_con['ds']['N']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['broadleaved_upland']['h']*nut_con['h']['N']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['um']['N']*1e-3\n p_gv[ix] = gv_field[ix] * fl_share['broadleaved_upland']['ds']*nut_con['ds']['P']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['broadleaved_upland']['h']*nut_con['h']['P']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['um']['P']*1e-3\n nup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['N']*1e-3 * (1.0 -retrans['ds']['N']) \\\n +h_litterfall[ix] * nut_con['h']['N']*1e-3 * (1.0 -retrans['h']['N']) \\\n +um_litterfall[ix] * nut_con['um']['N']*1e-3 * (1.0 -retrans['um']['N'])\n pup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['P']*1e-3 * (1.0 -retrans['ds']['P']) \\\n +h_litterfall[ix] * nut_con['h']['P']*1e-3 * (1.0 -retrans['h']['P']) \\\n +um_litterfall[ix] * nut_con['um']['P']*1e-3 * (1.0 -retrans['um']['P'])\n\n \n #***************** Spruce mire ***************************************\n ix = ix_spruce_mire \n gv_tot[ix] = np.square(35.52 +0.001*longitude*dem[ix] -1.1*drain_s**2 -2e-5*vol[ix]*Nstems \\\n +4e-5*Nstems*age[ix] +0.139*longitude*drain_s) -0.5 + 116.54 #Total\n gv_bot[ix] = np.square(-3.182 + 0.022*latitude*longitude +2e-4*dem[ix]*age[ix] \\\n -0.077*sfc[ix]*longitude -0.003*longitude*vol[ix] + 2e-4*np.square(vol[ix]))-0.5 + 98.10 #Bottom layer total\n gv_field[ix] = np.square(23.24 -1.163*drain_s**2 +1.515*sfc[ix]*drain_s -2e-5*vol[ix]*Nstems\\\n +8e-5*ts*age[ix] +1e-5*Nstems*dem[ix])-0.5 + 162.58 #Field layer total\n # removing inconsistent values\n gv_field[ix] = np.minimum(gv_tot[ix], gv_field[ix])\n gv_bot[ix] = np.minimum(gv_tot[ix], gv_bot[ix]) \n gv_field[ix] = np.maximum(gv_field[ix], gv_tot[ix] - gv_bot[ix])\n\n #annual litterfall rates\n ds_litterfall[ix] = fl_share['spruce_mire']['ds']*(gv_tot[ix]-gv_bot[ix])*lit_share['ds']*fl_to_total_turnover\n h_litterfall[ix] = fl_share['spruce_mire']['h']*(gv_tot[ix]-gv_bot[ix])*lit_share['h']*fl_to_total_turnover\n s_litterfall[ix] = gv_bot[ix]*lit_share['s']\n n_gv[ix] = gv_field[ix] * fl_share['spruce_mire']['ds']*nut_con['ds']['N']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['spruce_mire']['h']*nut_con['h']['N']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['s']['N']*1e-3\n p_gv[ix] = gv_field[ix] * fl_share['spruce_mire']['ds']*nut_con['ds']['P']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['spruce_mire']['h']*nut_con['h']['P']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['s']['P']*1e-3\n nup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['N']*1e-3 * (1.0 -retrans['ds']['N']) \\\n +h_litterfall[ix] * nut_con['h']['N']*1e-3 * (1.0 -retrans['h']['N']) \\\n +s_litterfall[ix] * nut_con['s']['N']*1e-3 * (1.0 -retrans['s']['N'])\n pup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['P']*1e-3 * (1.0 -retrans['ds']['P']) \\\n +h_litterfall[ix] * nut_con['h']['P']*1e-3 * (1.0 -retrans['h']['P']) \\\n +s_litterfall[ix] * nut_con['s']['P']*1e-3 * (1.0 -retrans['s']['P'])\n\n \n #***************** Pine bogs ***************************************\n #ix = ix_pine_bog \n #gv_tot[ix] = np.square(50.098 +0.005*longitude*dem[ix] -1e-5*vol[ix]*Nstems +0.026*sfc[ix]*age[ix] \\\n # -1e-3*dem[ix]*ts -0.014*vol[ix]*drain_s) - 0.5 + 167.40 #Total \n #gv_bot[ix] = np.square(31.809 +0.008*longitude*dem[ix] -3e-4*Nstems*ba[ix] \\\n # +6e-5*Nstems*age[ix] -0.188*dem[ix]) -0.5 + 222.22 #Bottom layer total\n #gv_field[ix] = np.square(48.12 -1e-5*ts**2 +0.013*sfc[ix]*age[ix] -0.04*vol[ix]*age[ix] \\\n # +0.026*sfc[ix]*vol[ix]) - 0.5 +133.26 #Field layer total\n #***************** Pine bogs ***************************************\n ix = ix_pine_bog \n #Nstems=1555. #mean muukkonen makipaa artikkelista\t\t\n #gv_tot[ix] = np.square(50.098 +0.005*longitude*dem[ix] -1e-5*vol[ix]*Nstems +0.026*sfc[ix]*age[ix] \\\n # -1e-3*dem[ix]*ts -0.014*vol[ix]*drain_s) - 0.5 + 167.40 #Total \n gv_tot[ix] = np.square(50.098 +0.005*longitude*dem[ix] -1e-5*vol[ix]*Nstems +0.026*sfc[ix]*age[ix] \\\n -1e-4*dem[ix]*ts -0.014*vol[ix]*drain_s) - 0.5 + 167.40 #Total \n gv_bot[ix] = np.square(31.809 +0.008*longitude*dem[ix] -3e-4*Nstems*ba[ix] \\\n +6e-5*Nstems*age[ix] -0.188*dem[ix]) -0.5 + 222.22 #Bottom layer total\n #gv_field[ix] = np.square(48.12 -1e-5*ts**2 +0.013*sfc[ix]*age[ix] -0.04*vol[ix]*age[ix] \\\n # +0.026*sfc[ix]*vol[ix]) - 0.5 +133.26 #Field layer total\n gv_field[ix] = np.square(48.12 -1e-5*ts**2 +0.013*sfc[ix]*age[ix] -0.04*vol[ix]*drain_s \\\n +0.026*sfc[ix]*vol[ix]) - 0.5 +133.26 #Field layer total\n \n\n\n\n # removing inconsistent values\n gv_field[ix] = np.minimum(gv_tot[ix], gv_field[ix])\n gv_bot[ix] = np.minimum(gv_tot[ix], gv_bot[ix]) \n gv_field[ix] = np.maximum(gv_field[ix], gv_tot[ix] - gv_bot[ix])\n \n #annual litterfall rates\n ds_litterfall[ix] = fl_share['pine_bog']['ds']*(gv_tot[ix]-gv_bot[ix])*lit_share['ds']*fl_to_total_turnover\n h_litterfall[ix] = fl_share['pine_bog']['h']*(gv_tot[ix]-gv_bot[ix])*lit_share['h']*fl_to_total_turnover\n s_litterfall[ix] = gv_bot[ix]*lit_share['s']\n n_gv[ix] = gv_field[ix] * fl_share['pine_bog']['ds']*nut_con['ds']['N']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['pine_bog']['h']*nut_con['h']['N']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['s']['N']*1e-3\n p_gv[ix] = gv_field[ix] * fl_share['pine_bog']['ds']*nut_con['ds']['P']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['pine_bog']['h']*nut_con['h']['P']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['s']['P']*1e-3\n nup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['N']*1e-3 * (1.0 -retrans['ds']['N']) \\\n +h_litterfall[ix] * nut_con['h']['N']*1e-3 * (1.0 -retrans['h']['N']) \\\n +s_litterfall[ix] * nut_con['s']['N']*1e-3 * (1.0 -retrans['s']['N'])\n pup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['P']*1e-3 * (1.0 -retrans['ds']['P']) \\\n +h_litterfall[ix] * nut_con['h']['P']*1e-3 * (1.0 -retrans['h']['P']) \\\n +s_litterfall[ix] * nut_con['s']['P']*1e-3 * (1.0 -retrans['s']['P'])\n\n #**************** Open peatlands**********************************\n # Not in Mäkipää & Muukkonen, apply Pine bogs\n ix = ix_open_peat \n age[ix] = 10.\n vol[ix] = 5.\n ba[ix] = 1.\n Nstems=100.\n\n gv_bot[ix] = np.square(31.809 +0.008*longitude*dem[ix] -3e-4*Nstems*ba[ix] \\\n +6e-5*Nstems*age[ix] -0.188*dem[ix]) -0.5 + 222.22 #Bottom layer total\n gv_field[ix] = np.square(48.12 -1e-5*ts**2 +0.013*sfc[ix]*age[ix] -0.04*vol[ix]*age[ix] \\\n +0.026*sfc[ix]*vol[ix]) - 0.5 +133.26 #Field layer total\n gv_tot[ix] = np.square(50.098 +0.005*longitude*dem[ix] -1e-5*vol[ix]*Nstems +0.026*sfc[ix]*age[ix] \\\n -1e-3*dem[ix]*ts -0.014*vol[ix]*drain_s) - 0.5 + 167.40 #Total \n \n # removing inconsistent values\n gv_field[ix] = np.minimum(gv_tot[ix], gv_field[ix])\n gv_bot[ix] = np.minimum(gv_tot[ix], gv_bot[ix]) \n gv_field[ix] = np.maximum(gv_field[ix], gv_tot[ix] - gv_bot[ix])\n \n #annual litterfall rates\n ds_litterfall[ix] = fl_share['pine_bog']['ds']*(gv_tot[ix]-gv_bot[ix])*lit_share['ds']*fl_to_total_turnover\n h_litterfall[ix] = fl_share['pine_bog']['h']*(gv_tot[ix]-gv_bot[ix])*lit_share['h']*fl_to_total_turnover\n s_litterfall[ix] = gv_bot[ix]*lit_share['s']\n n_gv[ix] = gv_field[ix] * fl_share['pine_bog']['ds']*nut_con['ds']['N']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['pine_bog']['h']*nut_con['h']['N']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['s']['N']*1e-3\n p_gv[ix] = gv_field[ix] * fl_share['pine_bog']['ds']*nut_con['ds']['P']*1e-3*fl_above_to_total \\\n +gv_field[ix] * fl_share['pine_bog']['h']*nut_con['h']['P']*1e-3*fl_above_to_total \\\n +gv_bot[ix] *nut_con['s']['P']*1e-3\n nup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['N']*1e-3 * (1.0 -retrans['ds']['N']) \\\n +h_litterfall[ix] * nut_con['h']['N']*1e-3 * (1.0 -retrans['h']['N']) \\\n +s_litterfall[ix] * nut_con['s']['N']*1e-3 * (1.0 -retrans['s']['N'])\n pup_litter[ix] = ds_litterfall[ix] * nut_con['ds']['P']*1e-3 * (1.0 -retrans['ds']['P']) \\\n +h_litterfall[ix] * nut_con['h']['P']*1e-3 * (1.0 -retrans['h']['P']) \\\n +s_litterfall[ix] * nut_con['s']['P']*1e-3 * (1.0 -retrans['s']['P'])\n \n \n \n #------------Change clear-cut areas: reduce to 1/3 of modelled ---------------------------------------------------\n to_cc = 0.33\n #ix_cc = np.where(np.logical_and(gisdata['age']<5.0, gisdata['smc']!=4)) #small stands excluding open peatlands\n ix_cc = np.where(gisdata['age']<5.0)\n n_gv[ix_cc] = n_gv[ix_cc] * to_cc \n p_gv[ix_cc] = p_gv[ix_cc] * to_cc\n nup_litter[ix_cc] = nup_litter[ix_cc] * to_cc\n pup_litter[ix_cc] = pup_litter[ix_cc] * to_cc \n gv_bot[ix_cc] = gv_bot[ix_cc] * to_cc\n\n if np.nanmin(n_gv) < 0.0:\n print ('NEGATIVE UPTAKE')\n print (np.nanmin(n_gv))\n import sys; sys.exit()\n return n_gv, p_gv, nup_litter, pup_litter, gv_bot\n\n dem = gisdata['dem'].copy() # x2 surface elevation m asl\n sfc = np.where(gisdata['sfc']>5, 5, gisdata['sfc'])\n vol = gisdata['vol'].copy() # x5 stand volume m3 ha-1\n ba = gisdata['ba'].copy() # x7 basal area m2 ha-1\n age = gisdata['age'].copy() # x9 stand age in yrs\n\n # initial N and P mass, kg ha-1 \n n_gv, p_gv, nup_litter, pup_litter, gv_tot = gv_biomass_and_nutrients(gisdata, ix_pine_upland, ix_spruce_upland, ix_broadleaved_upland, ix_spruce_mire, ix_pine_bog,\n ix_open_peat, latitude, longitude, dem, ts, sfc, vol, Nstems, ba, drain_s, age)\n \n # ground vegetation mass at the end of simulation, kg ha-1 \n vol = vol + expected_yield\n age = age + simtime\n n_gv_end, p_gv_end, nup_litter_end, pup_litter_end, gv_tot = gv_biomass_and_nutrients(gisdata, ix_pine_upland, ix_spruce_upland, ix_broadleaved_upland, ix_spruce_mire, ix_pine_bog,\n ix_open_peat, latitude, longitude, dem, ts, sfc, vol, Nstems, ba, drain_s, age)\n \n # nutrient uptake due to net change in gv biomass, only positive values accepted, negative do not associate to nutrient uptake\n nup_net = np.where(n_gv_end - n_gv > 0.0, n_gv_end - n_gv, 0.0)\n pup_net = np.where(p_gv_end - p_gv > 0.0, p_gv_end - p_gv, 0.0)\n \n nup_litter_mean = np.mean([nup_litter, nup_litter_end], axis = 0)\n pup_litter_mean = np.mean([pup_litter, pup_litter_end], axis = 0)\n \n nup = nup_net + nup_litter_mean*simtime # total N uptake kg ha-1 simulation time (in yrs) -1\n pup = pup_net + pup_litter_mean*simtime # total P uptake kg ha-1 simulation time (in yrs) -1\n \n popt = False\n if popt:\n import matplotlib.pylab as plt\n fig = plt.figure(num='nutvege')\n plt.subplot(331); plt.imshow(n_gv); plt.colorbar()\n plt.subplot(332); plt.imshow(nup_net); plt.colorbar()\n plt.subplot(333); plt.imshow(nup_litter_mean); plt.colorbar()\n plt.subplot(334); plt.imshow(p_gv); plt.colorbar()\n plt.subplot(335); plt.imshow(pup_net); plt.colorbar()\n plt.subplot(336); plt.imshow(pup_litter); plt.colorbar()\n plt.subplot(337); plt.imshow(gv_tot); plt.colorbar()\n plt.subplot(338); plt.imshow(nup); plt.colorbar()\n plt.subplot(339); plt.imshow(pup); plt.colorbar()\n \n \n return nup, pup\n\n\ndef TAmr(T):\n #Ojanen et al. 2010 Forest Ecology and Management 260:411-421 \n #compute mean growing season air temperature\n import datetime\n import pandas as pd\n dfOut = pd.DataFrame(T) \n dfOut['doy']= dfOut.index.dayofyear\n start= dfOut.index[0]\n end = dfOut.index[-1] \n summer = dfOut.groupby(dfOut['doy']).mean()\n start = datetime.datetime(2010,5,1).timetuple().tm_yday\n end = datetime.datetime(2010,10,31).timetuple().tm_yday\n TAmr= summer[start:end].mean()\n return TAmr[0] \n \ndef peatRespiration(GisData, T, imm_n, imm_p, TASmr=10.4, gwl=None, dt=1):\n \"\"\"\n Returns potential soil oxygen consumption through soil respiration (CO2) flux through the soil surface according to Ojanen et al 2010 ForEco, eq1\n Boundary conditions: max soil temperature 16 deg C. No minimum value. \\n\n Input:\n peatmask, this model for peat only \\n\n vol volume of the growung stock m3ha-1 \\n \n Tsoil soil temperature in 5 cm depth, deg C \\n\n TAmr mean air temperature May-October, deg C \\n \n gwl ground water level, m, negative down \\n\n bd bulk density of topsoil \\n\n Output:\n Heterotrophic respiration, CO2 efflux caused by decomposition: Potential CO2 efflux from soil as kg m-2 s-1 \\n\n \"\"\"\n import numpy as np\n #Ojanen et al. 2010 Forest Ecology and Management 260:411-421 \n \n nrows = int(((GisData['info'][0]).split()[1])) #shape c,r\n ncols = int(((GisData['info'][1]).split()[1]))\n peatm = GisData['peatm']\n\n\n co2 = np.empty((ncols,nrows)); co2[:]=np.nan\n Rref = np.empty((ncols,nrows)); Rref[:]=np.nan\n Nrel = np.empty((ncols,nrows)); Nrel[:]=np.nan\n Prel = Nrel.copy()\n B = Nrel.copy()\n \n #ixp = np.where(peatm==True) \n #ix_2 = np.where(np.logical_and(np.less_equal(GisData['sfc'],2),np.equal(peatm,True))) # Rhtkg fertility\n #ix_3 = np.where(np.logical_and(np.equal(GisData['sfc'],3),np.equal(peatm,True))) # Mtkg fertility\n #ix_4 = np.where(np.logical_and(np.equal(GisData['sfc'],4),np.equal(peatm,True))) # Ptkg fertility\n #ix_5 = np.where(np.logical_and(np.greater_equal(GisData['sfc'],5),np.equal(peatm,True))) # Vtkg fertility\n #ix={'sfc_2': ix_2, 'sfc_3': ix_3, 'sfc_4': ix_4, 'sfc_5': ix_5} \n \n \n ixp = np.greater_equal(GisData['smc'],2)\n #print(\"smc 6 20 cr:\", GisData['smc'][20,6])\n #print(ixp)\n ix_2 = np.where(np.logical_and(np.less_equal(GisData['sfc'],2),np.greater_equal(GisData['smc'],2))) # Rhtkg fertility\n ix_3 = np.where(np.logical_and(np.equal(GisData['sfc'],3),np.greater_equal(GisData['smc'],2))) # Mtkg fertility\n ix_4 = np.where(np.logical_and(np.equal(GisData['sfc'],4),np.greater_equal(GisData['smc'],2))) # Ptkg fertility\n ix_5 = np.where(np.logical_and(np.greater_equal(GisData['sfc'],5),np.greater_equal(GisData['smc'],2))) # Vtkg fertility\n ix={'sfc_2': ix_2, 'sfc_3': ix_3, 'sfc_4': ix_4, 'sfc_5': ix_5} \n \n \n \n V = GisData['vol']\n #V=100.\n if T >16. : T=16.\n #if T<-5.: T=-45. \n T5ref=10.0; T50=-46.02 #; wtm =80.0 # wtm mean water table in cm May-Oct, maximum 80 in the Ojanen data, max used becase later cut with the current wt\n pt = 99.0\n bd = {'sfc_2': 0.14, 'sfc_3': 0.11, 'sfc_4': 0.10, 'sfc_5': 0.08} # Mese study: bulk densities in different fertility classes # peat layer thickness, cm \n N = {'sfc_2': 1.9, 'sfc_3': 1.6, 'sfc_4': 1.4, 'sfc_5': 1.2} # Mese study: N cont in OM % dm\n P = {'sfc_2': 0.1, 'sfc_3': 0.08, 'sfc_4': 0.06, 'sfc_5': 0.05} # Mese study: P cont in OM % dm\n\n for k in ix.keys():\n #Rref[ix[k]] = 0.0695 + V[ix[k]]*3.7e-4 + bd[k]*1000.0 * 5.4e-4 +wtm * 1.2e-3 # parameters: Table 3 RHet\n #Rref[ix[k]] = 0.0695 + V*3.7e-4 + bd[k]*1000.0 * 5.4e-4 +wtm * 1.2e-3 # parameters: Table 3 RHet\n Rref[ix[k]] = 0.0695 + V[ix[k]]*3.7e-4 + bd[k]*1000.0 * 5.4e-4 +gwl[ix[k]]*(-100.0) * 1.2e-3 # parameters: Table 3 RHet\n \n #********** Different bd for each fertility class\n for k in ix.keys():\n B[ix[k]]= 156.032 + 16.5*TASmr - 0.196*pt + 0.354*bd[k]*1000. # Ojanen 2010 et al. Table 4 \n \n #Computes momentary Heterotrophic CO2 flux as a function of soil temperature and peat bulk density \n for k in ix.keys():\n co2[ix[k]] = Rref[ix[k]]*np.exp(B[ix[k]]*(1.0/(T5ref-T50)-1.0/(T-T50))) #g m-2 h-1 \n co2[ix[k]] = co2[ix[k]] *10000. /1000. * 24. * dt # Conversion to kg ha day-1 \n\n #N release\n C_in_OM = 0.55 # C content in OM kg kg-1\n CO2_to_C = 12./44.\n Nmicrob = imm_n # microbial immobilisation \n Pmicrob = imm_p # microbial immobilisation \n \n #Restrict CO2 evolution by gwl -> no co2 efflux below the water table \n #May 21 2020 removed\n #if gwl is not None: co2[ixp]= z_distrib_decomposition(gwl[ixp])*co2[ixp] \n\n for k in ix.keys():\n Nrel[ix[k]] = co2[ix[k]] * CO2_to_C / C_in_OM * N[k] / 100. * (1.-Nmicrob) # Nrelease kg ha-1 day-1\n Prel[ix[k]] = co2[ix[k]] * CO2_to_C / C_in_OM * P[k] / 100. * (1.-Pmicrob) # Prelease kg ha-1 day-1\n \n return Nrel, Prel, ixp\n\ndef z_distrib_decomposition(gwl, Qdistrib_beta = 0.96):\n \"\"\"\n Distribution Gale & Grigal Canadian Journal of Forest Research, 1987, 17(8): 829-834, 10.1139/x87-131 \\n\n Input:\n gwl m, negative or positive (absolute value)\n Output:\n share of actual decomposition from the potential\n \"\"\"\n qd = Qdistrib_beta**0.0 - Qdistrib_beta**(np.abs(gwl*100.))\n qd=np.where(qd > 0.1, qd, 0.1)\n return qd \n \ndef uptake(GisData, motti, simtime):\n\n nrows = int(((GisData['info'][0]).split()[1])) #shape c,r\n ncols = int(((GisData['info'][1]).split()[1]))\n\n #********** Species ************\n vol = GisData['vol'] # total volume in pixel\n #spe = np.zeros(np.shape(GisData['vol'])) # initialize species array \n spe = np.ones(np.shape(GisData['vol']))\n pine = GisData['p_vol']; spruce = GisData['s_vol']; birch = GisData['b_vol']\n \n pidx=np.greater_equal(pine, spruce+birch); sidx=np.less(pine, spruce+birch) # nutrient accumulation in birch is close to birch\n spe[pidx]=1; spe[sidx]=2\n\n #******* Soil main class **********\n smc = GisData['smc'].copy()\n midx = np.where(GisData['smc']<2); smc[midx] = 1 #mineral\n pidx = np.where(GisData['smc']>1); smc[pidx] = 2 #peat\n oidx = np.where(GisData['smc']==4) #indices for open peatland\n\n #******* Soil fertility class, age, stand height\n sfc = GisData['sfc']\n sfc = sfc / GisData['sfc'] *3 # So far all fertility class 3\n age = GisData['age'].copy() #; age[age<1.]= 1.0\n height = GisData['hc'].copy() #; height[height<0.5]=0.5\n \n #**** the soil fertility & main classes present *********\n fC = np.unique(sfc[~np.isnan(sfc)])\n mC = np.unique(smc[~np.isnan(smc)])\n sp = np.unique(spe[~np.isnan(spe)])\n \n #******** creating parameter arrays *****************\n si = np.empty((ncols,nrows)); si[:]=np.nan #site index\n iage = np.empty((ncols,nrows)); iage[:]=np.nan #index age\n b1 = np.empty((ncols,nrows)); b1[:]=np.nan #parameters\n b2 = np.empty((ncols,nrows)); b2[:]=np.nan \n\n ysi = np.empty((ncols,nrows)); ysi[:]=np.nan #parameters for yield\n yiage = np.empty((ncols,nrows)); yiage[:]=np.nan\n yb1 = np.empty((ncols,nrows)); yb1[:]=np.nan\n yb2 = np.empty((ncols,nrows)); yb2[:]=np.nan\n #******* locating to map *************************\n lis = list(product(fC,mC,sp))\n for li in lis:\n s,minpe,spec = li \n speci = 'Pine' if spec==1 else 'Spruce'\n idx = np.where(np.logical_and(np.logical_and(sfc==s, smc==minpe), spe==spec))\n \n si[idx]=motti[s][minpe][speci]['hpara']['para'][0]\n iage[idx]=motti[s][minpe][speci]['hpara']['para'][1]\n b1[idx]=motti[s][minpe][speci]['hpara']['para'][2]\n b2[idx]=motti[s][minpe][speci]['hpara']['para'][3]\n\n ysi[idx]=motti[s][minpe][speci]['ypara']['para'][0]\n yiage[idx]=motti[s][minpe][speci]['ypara']['para'][1]\n yb1[idx]=motti[s][minpe][speci]['ypara']['para'][2]\n yb2[idx]=motti[s][minpe][speci]['ypara']['para'][3]\n\n #********** computing yield for the simulation time ******************** \n f = lambda age, si, iage, b1, b2: si*((1.0-np.exp(-1.0*b1*age))/(1.0-np.exp(-1.0*b1*iage)))**b2 \n hcalc = f(age, si, iage, b1,b2)\n rel = height/hcalc #relative heigth growth performance \n rel[rel>2.]=2.\n rel[rel<0.5]=0.5\n del si, iage, b1, b2\n \n #******* expected yield ************************\n dt = simtime\n y = (f(age+dt, ysi, yiage, yb1, yb2) - f(age, ysi, yiage, yb1, yb2))*rel\n del ysi, yiage, yb1, yb2\n \n \n #N assimilation functions by Raija Laiho 1997 (peat soils)\n nUp = np.empty((ncols,nrows)); nUp[:]=np.nan; pUp = nUp.copy() # initiate N and P uptake arrays\n idxP = np.where(spe==1) # indices for pine dominated pixels\n idxS = np.where(spe==2) # indices for spruce dominated pixels\n #idxB = np.where(spe==3) # indices for birch dominated pixels \n \n #********* Nutrient net uptake computation *********************** \n MarjoNut = lambda vol, lna, b, k: np.exp(lna + b*np.log(vol) + k)\n par = { # Palviainen & Finer, 2012 Eur J For Res 131:945-964, eq 2, Table 7\n 'N':{'pine':[1.856,0.631,0.050], 'spruce': [2.864,0.557,0.051], 'birch':[1.590,0.788,0.044]},\n 'P':{'pine':[-2.387,0.754,0.158], 'spruce':[-2.112,0.773,0.070], 'birch':[-3.051,1.114,0.102]}\n }\n #******** Nitrogen ************** \n lna,b,k=par['N']['pine'] # pine\n nUp[idxP]= MarjoNut(vol[idxP]+y[idxP], lna, b, k) - MarjoNut(vol[idxP], lna, b, k)\n lna,b,k=par['N']['spruce'] # spruce\n nUp[idxS]= MarjoNut(vol[idxS]+y[idxS], lna, b, k) - MarjoNut(vol[idxS], lna, b, k)\n #******** Phosphorus ************** \n lna,b,k=par['P']['pine'] # pine\n pUp[idxP]= MarjoNut(vol[idxP]+y[idxP], lna, b, k) - MarjoNut(vol[idxP], lna, b, k)\n lna,b,k=par['P']['spruce'] # spruce\n pUp[idxS]= MarjoNut(vol[idxS]+y[idxS], lna, b, k) - MarjoNut(vol[idxS], lna, b, k)\n \n #****** from leafarea back to leaf mass\n sla={'pine':5.54, 'spruce': 5.65, 'decid': 18.46} # m2/kg, Kellomäki et al. 2001 Atm. Env. \n llp = {'pine': 3.0, 'spruce':5.0} # leaf longevity, pine, yrs\n lnutcont = {'N':{'pine': 1.0e-2, 'spruce': 1.0e-2}, # nitrogen content, kg kg-1\n 'P': {'pine': 1.0e-3, 'spruce': 1.0e-3}} \n retrans = {'N':{'pine': 0.5, 'spruce': 0.5}, # retranslocation\n 'P':{'pine': 0.5, 'spruce': 0.5}}\n \n #nUp_gv = 12.0*dt; pUp_gv = 2.0*dt # Palviainen väikkäri n and p uptake by ground vegetation kg/yr\n #nUp_gv = 4.0*dt; pUp_gv = 0.6*dt \n \n # *****N uptake due to changing of leaves ****************\n #ATTN! Multiply with 1e4, not 1e3\n #nleafup = (GisData['LAI_pine'] / sla['pine']*1e3 * lnutcont['N']['pine'] * (1.-retrans['N']['pine']) / llp['pine'] \\\n # + GisData['LAI_spruce'] / sla['spruce']*1e3 * lnutcont['N']['spruce'] * (1.-retrans['N']['spruce']) / llp['spruce']) *dt\n nleafup = (GisData['LAI_pine'] / sla['pine']*1e4 * lnutcont['N']['pine'] * (1.-retrans['N']['pine']) / llp['pine'] \\\n + GisData['LAI_spruce'] / sla['spruce']*1e4 * lnutcont['N']['spruce'] * (1.-retrans['N']['spruce']) / llp['spruce']) *dt\n to_gross = 1.0\n Nup_tot = nUp*to_gross + nleafup \n\n # *****P uptake due to changing of leaves ************************* \n #pleafup = (GisData['LAI_pine'] / sla['pine']*1e3 * lnutcont['P']['pine'] * (1.-retrans['P']['pine']) / llp['pine'] \\\n # + GisData['LAI_spruce'] / sla['spruce']*1e3 * lnutcont['P']['spruce'] * (1.-retrans['P']['spruce']) / llp['spruce']) *dt\n pleafup = (GisData['LAI_pine'] / sla['pine']*1e4 * lnutcont['P']['pine'] * (1.-retrans['P']['pine']) / llp['pine'] \\\n + GisData['LAI_spruce'] / sla['spruce']*1e4 * lnutcont['P']['spruce'] * (1.-retrans['P']['spruce']) / llp['spruce']) *dt\n\n to_gross = 1.0 \n Pup_tot = pUp*to_gross + pleafup \n \n # open peatlands: insert small uptake for stand to avoid empty array \n Nup_tot[oidx] = 0.5 * simtime # 0.5 kg/yr/ha\n Pup_tot[oidx] = 0.05 * simtime # 0.05 kg/yr/ha\n y[oidx] = 0.1 #m3/ha/yr\n \n return Nup_tot , Pup_tot, y\n\ndef nutBalance(sto, up_s, up_gv, rel):\n sto = sto + rel - up_s - up_gv\n sto[sto<0.] = 0.0\n return sto\n \n\ndef nConc(cmask, D, Wliq, nsto):\n c=np.empty(np.shape(cmask))\n c[:,:] = np.NaN\n ix = np.isfinite(cmask)\n c[ix] = (nsto[ix]*1e6) / (Wliq[ix] * D[ix] * 1e4 *1e3) #mg/l\n return c\n \ndef ddgrowth(ddsm, meandd, yrs):\n #distributes nutrient uptake to days, scales with temp sum, \n #returns fraction on uptake for each day\n dd=np.array(ddsm / (yrs*meandd))\n uprate = np.gradient(dd)\n return uprate\n \ndef my_Raijan_ravinnef(vol, p1, p2, p3):\n #returns kg/ha \n return (p1*vol + p2*np.log(vol)**p3)*10.0\n\ndef vdataQuery(alue, alku, loppu, kysely, fname=None):\n \"\"\"\n Runs Vesidata html standard queries \n \n IN:\n alue - alueid (int)\n alku - '2015-05-25', (str)\n loppu -'2015-06-01', (str)\n kysely -'raw', 'wlevel', 'saa', (str)\n fname - filename for saving ascii-file\n OUT:\n dat - pd DataFrame; index is time and keys are from 1st line of query\n Samuli L. 25.4.2016; queries by Jukka Pöntinen & Anne Lehto\n \n käyttöesim1: https://taimi.in.metla.fi/cgi/bin/12.vesidata_haku.pl?id=3&alku=2016-01-25&loppu=2016-02-10&kysely=wlevel \n käyttöesim2: https://taimi.in.metla.fi/cgi/bin/12.vesidata_haku.pl?id=Porkkavaara&alku=2016-01-25&loppu=2016-02-10&kysely=raw\n käyttöesim3: https://taimi.in.metla.fi/cgi/bin/12.vesidata_haku.pl?id=Porkkavaara&alku=2016-01-25&loppu=2016-02-10&kysely=saa\n \n vaaditaan parametrit:\n id = Alueen nimi tai numero, esim Kivipuro tai 33, joka on Kivipuron aluenumero, \n annual-ryhmän kyselyyn voi antaa id=all, jolloin haetaan kaikki alueet\n \n alku = päivä,josta lähtien haetaan 2016-01-25\n \n loppu = päivä,johon saakka haetaan 2016-02-10\n \n kysely: \n 'wlevel' = haetaan vedenkorkeusmuuttujat tietyssä järjestyksessä\n 'raw' = haetaan näiden lisäksi kaikki 'raw'-ryhmän muuttujat\n 'saa' = haetaan päivittäinen sää, eli sademäärä ja keskilämpötila \n 'annual' = haetaan vuoden lasketut tulokset, päivämäärän alkupäivä on vuoden 1. päivä, loppupäivä esim. vuoden toinen päivä\n 'craw'= haetaan kaikki tämän ryhmän muuttujat \n 'dload'= haetaan kaikki tämän ryhmän muuttujat \n 'roff'= haetaan kaikki tämän ryhmän muuttujat \n 'wquality'= haetaan kaikki tämän ryhmän muuttujat \n\n \"\"\"\n\n import urllib2, os, shutil\n import pandas as pd\n #addr='https://taimi.in.metla.fi/cgi/bin/12.vesidata_haku.pl?id=all&alku=2014-01-01&loppu=2014-10-25&kysely=annual' KAIKKI ANNUAL-MUUTTUJAT\n \n #addr='https://taimi.in.metla.fi/cgi/bin/vesidata_haku.pl?id=Liuhapuro&alku=2015-05-25&loppu=2015-06-10&kysely=raw'\n \n addr='https://taimi.in.metla.fi/cgi/bin/vesidata_haku.pl?id=%s&alku=%s&loppu=%s&kysely=%s' %(str(alue), alku, loppu, kysely)\n ou='tmp.txt'\n \n f=urllib2.urlopen(addr) #open url, read to list and close\n r=f.read().split(\"\\n\")\n f.close()\n \n g=open(ou, 'w') #open tmp file, write, close\n g.writelines(\"%s\\n\" % item for item in r)\n g.close()\n \n #read 'tmp.txt' back to dataframe\n if kysely is 'annual': #annual query has different format\n dat=pd.read_csv(ou)\n f=dat['v_alue_metodi']\n yr=[]; alue=[]; mtd=[] \n for k in range(0, len(f)):\n yr.append(float(f[k].split('a')[0]))\n mtd.append(int(f[k].split('d')[1]))\n x=f[k].split('m')[0]\n alue.append(int(x.split('e')[1]))\n dat=dat.drop('v_alue_metodi',1)\n dat.insert(0,'alue_id', alue); dat.insert(1, 'vuosi',yr); dat.insert(2,'mtd', mtd)\n \n else: #...than the other queries\n dat=pd.read_csv(ou,index_col=0)\n dat.index=dat.index.to_datetime() #convert to datetime\n \n \n if kysely is 'wlevel': #manipulate column names\n cols=list(dat.columns.values)\n h=[] \n for item in cols:\n h.append(item.split(\"=\")[1])\n dat.columns=h\n \n if fname is not None: #copy to fname, remove ou\n shutil.copy(ou, fname)\n os.remove(ou)\n\n return dat ", "sub_path": "spafhy_stand.py", "file_name": "spafhy_stand.py", "file_ext": "py", "file_size_in_byte": 51108, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.equal", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 164, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 169, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 170, "usage_type": "call"}, {"api_name": "pyproj.transform", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 426, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 432, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 465, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 466, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 470, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 489, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 490, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 498, "usage_type": "name"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 499, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 499, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 499, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 499, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 500, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 500, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 500, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 500, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 501, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 501, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 501, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 501, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 502, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 502, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 502, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 502, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 503, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 503, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 503, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 503, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 504, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 504, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 504, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 504, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 505, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 505, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 505, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 505, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 506, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 506, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 506, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 506, "usage_type": "call"}, {"api_name": "matplotlib.pylab.subplot", "line_number": 507, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 507, "usage_type": "name"}, {"api_name": "matplotlib.pylab.imshow", "line_number": 507, "usage_type": "call"}, {"api_name": "matplotlib.pylab.colorbar", "line_number": 507, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 518, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 523, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 550, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 551, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 551, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 552, "usage_type": "attribute"}, {"api_name": "numpy.greater_equal", "line_number": 564, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.less_equal", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 596, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 623, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 624, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 635, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 635, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.less", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 644, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 654, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 654, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 655, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 655, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 656, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 656, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 659, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 659, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 660, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 660, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 661, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 661, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 662, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 662, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 664, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 664, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 665, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 665, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 666, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 666, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 667, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 667, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 669, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 673, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 673, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 686, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 700, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 701, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 702, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 706, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 706, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 765, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 765, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 766, "usage_type": "attribute"}, {"api_name": "numpy.isfinite", "line_number": 767, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 774, "usage_type": "call"}, {"api_name": "numpy.gradient", "line_number": 775, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 780, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 829, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 839, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 851, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 863, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 864, "usage_type": "call"}]} +{"seq_id": "8765570", "text": "#!/usr/bin/env python3\n# Braceless0 - https://www.braceless.org.\n# Copyright (c) 2019-2020 Mikael Egevig (mikael@egevig.org). All Rights Reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below.\n# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with the distribution.\n# * Neither the name of Mikael Egevig nor the names of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,\n# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT\n# SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# This is the BOOTSTRAP compiler for the Braceless language. It is not intended to be used for production, only for development\n# and testing of planned language features. The initial production quality compiler is planned to be developed using this\n# bootstrap compiler once the bootstrap compiler offers sufficient language features and a sufficiently stable code generator.\n#\n# Roadmap:\n#\n# 1. Create driver program (the program entry point).\n# 2. Create scanner component (the simple scanner).\n# 3. Create parser component (the recursive descent parser).\n# 4. Create checker component (the type-checker).\n# 5. Create writer component (the LLVM v8+ IR generator).\n# 6. Create invoker component (the Clang invoker).\n# 7. Add more language features by incrementally extending all of the above.\n#\n# Part of this project being a BOOTSTRAP compiler is that the generated language will fluctuate and contain bugs. The initial\n# project is to settle on the syntax (done long ago) and the semantics (stricter-than-Python-Python-semantics). The initial\n# versions of the compiler are intentionally made as a single-file Python v3.7+ project. I plan to split the compiler into\n# separate modules once the dust settles and the compiler begins to stabilize. Until then, there's only this one source file.\n#\n# If you spot errors or oversights, please post them to the Braceless0 project at https://github.com/archfrog/Braceless0.\n#\n# Source code conventions:\n#\n# 1. All indentation is done only using tabulator characters, each of which count for four spaces.\n# 2. Private members are prefixed by two consecutive underscores (__).\n# 3. Protected members are prefixed by one consecutive underscores (_).\n# 4. Public members and attributes are not prefixed by any underscores.\n# 5. Global variables are always intended to be used as global constants; i.e. no updates to them after they have been set up.\n# 6. Global constant names are always written in all uppercase.\n#\n# Notices:\n# 1. The construct 'assert(that.type is not None)', which appears many times, were made to satisfy MyPy's static checks.\n\n# Enable Python v3.x's type hint as these are used extensively in order to allow MyPy to perform static checks on the code.\nfrom __future__ import annotations\nfrom typing import Any, cast, Dict, KeysView, List, Optional, Sequence, Union\n\nimport enum\nimport io\nimport os\nimport platform\nimport string\nimport subprocess\nimport sys\n\n\n#******************************************************** Global Constants *******************************************************\n\n# Whether a development (TEST) version or a production version (SHIP).\nMODE = \"TEST\"\nassert(MODE in [ \"SHIP\", \"TEST\" ])\n\n# The version of the Braceless v0.x bootstrap compiler (this script).\nVERSION = \"0.01.0001\"\nYEARSTR = \"2019-2020\"\nCOMPANY = \"Mikael Egevig\"\nWEBSITE = \"https://www.braceless.org\"\n\n# The end-of-line character, which is ALWAYS a Unix linefeed no matter the host platform (MacOS could change, so can Windows...).\nEOL = '\\n'\n\n# The space character, defined primarily for readability.\nSPC = ' '\n\n# The tabulator character, defined primarily for readability.\nTAB = '\\t'\n\n# The number of spaces per tabulator character. Used solely to position the virtual cursor correctly for error reporting.\nTABLEN = 4\n\n# The number of bits per boolean (it may turn out to be a bad idea to use only one bit for booleans, depending on LLVM).\nBITS_PER_BOOLEAN = 1\n\n# The number of bits per byte, defined primarily for readability.\nBITS_PER_BYTE = 8\n\n# The currently hardcoded number of bits per word. TODO: Eliminate 'BITS_PER_WORD'!\nBITS_PER_WORD = 64\n\n\n#**************************************************** Utility Classes and Functions **********************************************\n\ndef dd(value, die : bool = True) -> None:\n\t\"\"\"Dump-and-Die: Pretty print a value and then shut down the application (inspired by PHP and Laravel).\"\"\"\n\timport pprint\n\tpprint.PrettyPrinter(width=132).pprint(value)\n\n\tif die:\n\t\traise Error(\"Dump and Die\")\n\n\n# Dictionary that maps non-printable characters to a human-readable string.\nWHITESPACE_NAMES = {\n\tEOL : '(EOL)',\n\tSPC : '(SPC)',\n\tTAB : '(TAB)',\n}\n\n\n# Class used to pretty-print single characters which may be non-printable.\nclass CharacterFormatter(object):\n\n\tdef __init__(self, value : str) -> None:\n\t\tassert(len(value) == 1)\n\t\tself.__value = value\n\n\t# Formats a possibly non-printable character into human-readable text format.\n\tdef __str__(self) -> str:\n\t\tif self.__value not in string.whitespace:\n\t\t\treturn self.__value\n\n\t\tif self.__value in WHITESPACE_NAMES:\n\t\t\treturn WHITESPACE_NAMES[self.__value]\n\n\t\treturn \"(%04x)\" % ord(self.__value)\n\n\n# Class (structure) that represents and formats a single virtual cursor position in an input file.\nclass Cursor(object):\n\n\tdef __init__(self, file : str, line : int = 0, char : int = 0) -> None:\n\t\tself.file = file\n\t\tself.line = line\n\t\tself.char = char\n\n\t# Formats a virtual cursor position into a human-readable text string.\n\tdef __str__(self) -> str:\n\t\tresult = \"(%s\" % self.file\n\t\tif self.line:\n\t\t\tresult += \":%u\" % self.line\n\t\t\tif self.char:\n\t\t\t\tresult += \",%u\" % self.char\n\t\tresult += \")\"\n\t\treturn result\n\n\n# Exception base class for all exceptions defined by this program.\nclass Error(Exception):\n\n\tdef __init__(self, text : str) -> None:\n\t\tException.__init__(self)\n\t\tself.__text = text\n\n\t@property\n\tdef text(self) -> str:\n\t\treturn self.__text\n\n\tdef __str__(self) -> str:\n\t\treturn \"Error: %s\" % self.__text\n\n\n# Prints the specified text string and exits the program with an exit code of 1.\nclass ShowTextAndExitProgram(Error):\n\n\tdef __init__(self, text : str) -> None:\n\t\tError.__init__(self, text)\n\n\tdef __str__(self) -> str:\n\t\treturn self.text\n\n\n# Exception class used to report internal errors in the compiler itself.\nclass InternalError(Error):\n\n\tdef __init__(self, text : str) -> None:\n\t\tError.__init__(self, text)\n\n\tdef __str__(self) -> str:\n\t\treturn \"Internal Error: %s\" % self.text\n\n\n# Exception class used to transmit information about the location of errors found in the scanner and/or parser.\nclass CursorError(Error):\n\n\tdef __init__(self, cursor : Cursor, text : str) -> None:\n\t\tError.__init__(self, text)\n\t\tself.__cursor = Cursor(cursor.file, cursor.line, cursor.char)\n\n\t# Returns the virtual file position for this error.\n\t@property\n\tdef cursor(self) -> Cursor:\n\t\treturn self.__cursor\n\n\t# Returns the file name part of the virtual file position for this error.\n\t@property\n\tdef file(self) -> str:\n\t\treturn self.__cursor.file\n\n\t# Returns the line number part of the virtual file position for this error.\n\t@property\n\tdef line(self) -> int:\n\t\treturn self.__cursor.line\n\n\t# Returns the character index part of the virtual file position for this error.\n\t@property\n\tdef char(self) -> int:\n\t\treturn self.__cursor.char\n\n\tdef __str__(self) -> str:\n\t\traise InternalError(\"Derived class must override CursorError.__str__ method\")\n\n\n# Simple text stream writer that supports indenting/dedenting and always writes using Unix line ending format.\nclass TextWriter(object):\n\n\t# Takes the name of a file name or a Python text stream such as 'sys.stdout' as its output stream.\n\tdef __init__(self, file : Union[str, io.TextIOWrapper], tabsize : int = 4) -> None:\n\t\tself.__file = file\n\t\tself.__size = tabsize\n\t\tif isinstance(file, str):\n\t\t\tself.__stream = open(file, 'wb')\n\t\telse:\n\t\t\tself.__stream = file.buffer\n\t\tself.__level = 0\n\t\tself.__start = True\n\n\t# Closes the output stream.\n\tdef close(self) -> None:\n\t\tif isinstance(self.__file, str):\n\t\t\tself.__stream.close()\n\n\t# Dedents if and only if at the start of a line and the nesting level is greather than or equal to one.\n\tdef dedent(self, text : str = None) -> None:\n\t\tassert(self.__start)\n\n\t\tassert(self.__level >= 1)\n\t\tself.__level -= 1\n\n\t\tif text:\n\t\t\tself.write(text)\n\t\t\tself.flush()\n\n\t# Indents the output stream starting at the next line.\n\tdef indent(self, text : str = None) -> None:\n\t\tif text:\n\t\t\tself.flush(text)\n\n\t\tself.__level += 1\n\n\t# Undents, outputs text, and then re-indents.\n\tdef undent(self, text : str = None) -> None:\n\t\tself.dedent()\n\t\tself.flush(text)\n\t\tself.indent()\n\n\t# Writes an UTF-8 string as ASCII to the output file, while taking indentation into consideration.\n\tdef write(self, text : str) -> None:\n\t\tif self.__start:\n\t\t\tprefix = SPC * (self.__size * self.__level)\n\t\t\tself.__stream.write(prefix.encode('ascii'))\n\t\t\tself.__start = False\n\n\t\tself.__stream.write(text.encode('ascii'))\n\n\t# Writes an end-of-line character sequence to the output file (\"flushes the current line\").\n\tdef flush(self, text : str = None) -> None:\n\t\tif text:\n\t\t\tself.write(text)\n\n\t\tself.__stream.write(b'\\n')\n\t\tself.__start = True\n\n\n#************************************************************* Scanner/Lexer *****************************************************\n\n# Class that defines an enumeration of all token kinds known to the scanner/lexer.\nclass TokenKind(enum.Enum):\n\tBraceBegin = enum.auto()\n\tBraceClose = enum.auto()\n\tBracketBegin = enum.auto()\n\tBracketClose = enum.auto()\n\tCharacter = enum.auto()\n\tColon = enum.auto()\n\tComma = enum.auto()\n\tComment = enum.auto()\n\tDedent = enum.auto()\n\tDot = enum.auto()\n\tEndOfFile = enum.auto()\n\tEndOfLine = enum.auto()\n\tIndent = enum.auto()\n\tKeyword_And = enum.auto()\n\tKeyword_Boolean = enum.auto()\n\tKeyword_Call = enum.auto()\n\tKeyword_Cardinal1 = enum.auto()\n\tKeyword_Cardinal2 = enum.auto()\n\tKeyword_Cardinal4 = enum.auto()\n\tKeyword_Cardinal8 = enum.auto()\n\tKeyword_Class = enum.auto()\n\tKeyword_Create = enum.auto()\n\tKeyword_Elif = enum.auto()\n\tKeyword_Else = enum.auto()\n\tKeyword_Entry = enum.auto()\n\tKeyword_False = enum.auto()\n\tKeyword_Field = enum.auto()\n\tKeyword_Function = enum.auto()\n\tKeyword_Guard = enum.auto()\n\tKeyword_If = enum.auto()\n\tKeyword_In = enum.auto()\n\tKeyword_Io = enum.auto()\n\tKeyword_Is = enum.auto()\n\tKeyword_Let = enum.auto()\n\tKeyword_Module = enum.auto()\n\tKeyword_New = enum.auto()\n\tKeyword_Not = enum.auto()\n\tKeyword_Or = enum.auto()\n\tKeyword_Out = enum.auto()\n\tKeyword_Return = enum.auto()\n\tKeyword_Then = enum.auto()\n\tKeyword_Throw = enum.auto()\n\tKeyword_True = enum.auto()\n\tKeyword_Word = enum.auto()\n\tKeyword_Xor = enum.auto()\n\tLiteral_Cardinal = enum.auto()\n\tLiteral_Text = enum.auto()\n\tName = enum.auto()\n\tOperator_Asterisk = enum.auto()\n\tOperator_AsteriskEqual = enum.auto()\n\tOperator_ColonEqual = enum.auto()\n\tOperator_Difference = enum.auto()\n\tOperator_Equal = enum.auto()\n\tOperator_Exclamation = enum.auto()\n\tOperator_GreaterEqual = enum.auto()\n\tOperator_GreaterThan = enum.auto()\n\tOperator_LessEqual = enum.auto()\n\tOperator_LessThan = enum.auto()\n\tOperator_Minus = enum.auto()\n\tOperator_MinusEqual = enum.auto()\n\tOperator_Percent = enum.auto()\n\tOperator_PercentEqual = enum.auto()\n\tOperator_Plus = enum.auto()\n\tOperator_PlusEqual = enum.auto()\n\tOperator_QuestionEqual = enum.auto()\n\tOperator_Slash = enum.auto()\n\tOperator_SlashEqual = enum.auto()\n\tParenthesisBegin = enum.auto()\n\tParenthesisClose = enum.auto()\n\tSpace = enum.auto()\n\tText = enum.auto()\n\tVerbatim = enum.auto()\n\n\n# Class that represents a single token in an input file.\nclass Token(object):\n\n\tdef __init__(self, cursor, token_kind : TokenKind, text : str) -> None:\n\t\tself.cursor = Cursor(cursor.file, cursor.line, cursor.char)\n\t\tself.kind = token_kind\n\t\tself.text = text\n\n\tdef __str__(self) -> str:\n\t\tif len(self.text) == 1:\n\t\t\ttext = str(CharacterFormatter(self.text))\n\t\telse:\n\t\t\ttext = self.text\n\t\treturn \"%s %s %s\" % (self.cursor, str(self.kind), text)\n\n\n# Dictionary of all known operators in the Braceless0 language.\nOPERATOR_TOKENS = {\n\t'-' : TokenKind.Operator_Minus,\n\t'!' : TokenKind.Operator_Exclamation,\n\t'%' : TokenKind.Operator_Percent,\n\t'%=' : TokenKind.Operator_PercentEqual,\n\t'*' : TokenKind.Operator_Asterisk,\n\t'*=' : TokenKind.Operator_AsteriskEqual,\n\t'/' : TokenKind.Operator_Slash,\n\t'/=' : TokenKind.Operator_SlashEqual,\n\t':' : TokenKind.Colon,\n\t':=' : TokenKind.Operator_ColonEqual,\n\t'+' : TokenKind.Operator_Plus,\n\t'+=' : TokenKind.Operator_PlusEqual,\n\t'<' : TokenKind.Operator_LessThan,\n\t'<=' : TokenKind.Operator_LessEqual,\n\t'<>' : TokenKind.Operator_Difference,\n\t'=' : TokenKind.Operator_Equal,\n\t'-=' : TokenKind.Operator_MinusEqual,\n\t'>' : TokenKind.Operator_GreaterThan,\n\t'>=' : TokenKind.Operator_GreaterEqual,\n\t'?=' : TokenKind.Operator_QuestionEqual,\n}\n\n\n# The complete list of known keywords.\nKEYWORDS = {\n\t'and' : TokenKind.Keyword_And,\n\t'boolean' : TokenKind.Keyword_Boolean,\n\t'call' : TokenKind.Keyword_Call,\n\t'cardinal1' : TokenKind.Keyword_Cardinal1,\n\t'cardinal2' : TokenKind.Keyword_Cardinal2,\n\t'cardinal4' : TokenKind.Keyword_Cardinal4,\n\t'cardinal8' : TokenKind.Keyword_Cardinal8,\n\t'class' : TokenKind.Keyword_Class,\n\t'create' : TokenKind.Keyword_Create,\n\t'elif' : TokenKind.Keyword_Elif,\n\t'else' : TokenKind.Keyword_Else,\n\t'entry' : TokenKind.Keyword_Entry,\n\t'guard' : TokenKind.Keyword_Guard,\n\t'false' : TokenKind.Keyword_False,\n\t'field' : TokenKind.Keyword_Field,\n\t'function' : TokenKind.Keyword_Function,\n\t'if' : TokenKind.Keyword_If,\n\t'in' : TokenKind.Keyword_In,\n\t'io' : TokenKind.Keyword_Io,\n\t'is' : TokenKind.Keyword_Is,\n\t'let' : TokenKind.Keyword_Let,\n\t'module' : TokenKind.Keyword_Module,\n\t'new' : TokenKind.Keyword_New,\n\t'not' : TokenKind.Keyword_Not,\n\t'or' : TokenKind.Keyword_Or,\n\t'out' : TokenKind.Keyword_Out,\n\t'return' : TokenKind.Keyword_Return,\n\t'then' : TokenKind.Keyword_Then,\n\t'throw' : TokenKind.Keyword_Throw,\n\t'true' : TokenKind.Keyword_True,\n\t'xor' : TokenKind.Keyword_Xor,\n}\n\n\n# Class that defines an enumeration of all the states in the scanner/lexer.\nclass StateKind(enum.Enum):\n\n\tCardinal = enum.auto()\n\tCharacter1 = enum.auto()\n\tCharacter2 = enum.auto()\n\tCharacter3 = enum.auto()\n\tComment = enum.auto()\n\tDecimal = enum.auto()\n\tEndOfFile = enum.auto()\n\tEndOfLine = enum.auto()\n\tKeyword = enum.auto()\n\tName = enum.auto()\n\tOperator = enum.auto()\n\tSingleton = enum.auto()\n\tSpace = enum.auto()\n\tText1 = enum.auto()\n\tText2 = enum.auto()\n\tVerbatim1 = enum.auto()\n\tVerbatim2 = enum.auto()\n\n\n# Dictionary that maps the FIRST character of a new token to its corresponding state (it is extended below).\nSTART = {\n\t'\"' : StateKind.Text1,\n\t\"'\" : StateKind.Character1,\n\t'`' : StateKind.Verbatim1,\n\t'#' : StateKind.Comment,\n}\n\n\n# The list of characters that are valid in Braceless0 operators.\nOPERATORS = \"-%*/:^~+!<>=?\"\nfor value in OPERATORS:\n\tSTART[value] = StateKind.Operator\nfor value in string.digits:\n\tSTART[value] = StateKind.Cardinal\nfor value in string.ascii_letters:\n\tSTART[value] = StateKind.Name\n\n\n# Dictionary that maps single-character tokens to their corresponding states (it is extended below).\nSINGLETONS = {\n\tEOL : TokenKind.EndOfLine,\n\tSPC : TokenKind.Space,\n\t'(' : TokenKind.ParenthesisBegin,\n\t')' : TokenKind.ParenthesisClose,\n\t'[' : TokenKind.BracketBegin,\n\t']' : TokenKind.BracketClose,\n\t'{' : TokenKind.BraceBegin,\n\t'}' : TokenKind.BraceClose,\n\t'.' : TokenKind.Dot,\n\t',' : TokenKind.Comma\n}\nfor value in SINGLETONS.keys():\n\tSTART[value] = StateKind.Singleton\n\n\n# Used to decide if the second+ character belongs to a name or not.\nALNUM = string.ascii_letters + string.digits\n\n\n# Exception class used for reporting scanner/lexer errors.\nclass LexerError(CursorError):\n\n\tdef __init__(self, cursor : Cursor, text : str) -> None:\n\t\tCursorError.__init__(self, cursor, text)\n\n\tdef __str__(self) -> str:\n\t\treturn \"%s Error: %s\" % ( self.cursor, self.text )\n\n\n# Class that implements the lexer/scanner part of the compiler.\nclass Lexer(object):\n\n\t# Scans the specified text read from the specified file into a sequence of tokens.\n\tdef scan(self, file : str, text : str) -> List[Token]:\n\t\tcursor = Cursor(file, 1, 1)\n\t\tif len(text) > 0 and text[-1] != EOL:\n\t\t\traise LexerError(cursor, \"Last line not terminated properly by a newline character\")\n\n\t\ttokens = []\n\n\t\tkind = None\n\n\t\tindex = 0\n\t\tafter = len(text)\n\n\t\tstart = None\n\t\tstate = None\n\t\tslice = None\n\n\t\tlevel = 0\n\t\tstack = [ 0 ]\n\t\tblank = 0\n\n\t\tfirst = True\n\t\twhile index < after:\n\t\t\t# Process indentation.\n\t\t\tif first:\n\t\t\t\tif text[index] == EOL:\n\t\t\t\t\t# Count the number of blank lines in order to insert the dedent tokens in the proper place in the token list.\n\t\t\t\t\tindex += 1\n\t\t\t\t\tblank += 1\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Count number of indents (only tabs count as indentation).\n\t\t\t\tlevel = 0\n\t\t\t\twhile text[index] == TAB:\n\t\t\t\t\tindex += 1\n\t\t\t\t\tlevel += 1\n\t\t\t\t\tcursor.char += TABLEN\n\n\t\t\t\t# Output dedent tokens.\n\t\t\t\twhile level < len(stack) - 1:\n\t\t\t\t\tstack.pop()\n\t\t\t\t\ttokens.append(Token(Cursor(cursor.file, cursor.line, 1), TokenKind.Dedent, \"(dedent)\"))\n\n\t\t\t\t# Output empty lines AFTER the dedent tokens.\n\t\t\t\twhile blank > 0:\n\t\t\t\t\tblank -= 1\n\t\t\t\t\ttokens.append(Token(cursor, TokenKind.EndOfLine, EOL))\n\t\t\t\t\tcursor.line += 1\n\n\t\t\t\t# Allow only a single indent at a time: This is very different from the sloppy Python indentation rules.\n\t\t\t\tif level > len(stack) + 1:\n\t\t\t\t\traise ParserError(cursor, \"Source line indented multiple times\")\n\t\t\t\telif level == len(stack):\n\t\t\t\t\tstack.append(level)\n\t\t\t\t\ttokens.append(Token(Cursor(cursor.file, cursor.line, cursor.char - TABLEN), TokenKind.Indent, \"(indent)\"))\n\n\t\t\t\tfirst = False\n\n\t\t\t# Fetch the current character.\n\t\t\tvalue = text[index]\n\n\t\t\t# Make index point to the NEXT character (the lookahead).\n\t\t\tindex += 1\n\n\t\t\tif not state:\n\t\t\t\tstate = START.get(value)\n\t\t\t\tif not state:\n\t\t\t\t\traise LexerError(cursor, \"Unexpected character: %s\" % CharacterFormatter(value))\n\t\t\t\tstart = index - 1\n\n\t\t\tif state == StateKind.Singleton:\n\t\t\t\tkind = SINGLETONS[value]\n\t\t\telif state == StateKind.Name:\n\t\t\t\tif text[index] not in ALNUM:\n\t\t\t\t\tslice = text[start:index]\n\t\t\t\t\tkind = KEYWORDS.get(slice)\n\t\t\t\t\tif not kind:\n\t\t\t\t\t\tkind = TokenKind.Name\n\t\t\telif state == StateKind.Comment:\n\t\t\t\tif text[index] == EOL:\n\t\t\t\t\tkind = TokenKind.Comment\n\t\t\telif state == StateKind.Text1:\n\t\t\t\tstate = StateKind.Text2\n\t\t\telif state == StateKind.Text2:\n\t\t\t\tif value == '\"':\n\t\t\t\t\tkind = TokenKind.Text\n\t\t\t\t\tslice = text[start + 1:index - 1]\n\t\t\telif state == StateKind.Cardinal:\n\t\t\t\tif text[index] not in string.digits:\n\t\t\t\t\tkind = TokenKind.Literal_Cardinal\n\t\t\telif state == StateKind.Character1:\n\t\t\t\tstate = StateKind.Character2\n\t\t\telif state == StateKind.Character2:\n\t\t\t\tif value == \"'\":\n\t\t\t\t\traise LexerError(cursor, \"Character literal cannot embed apostrophe (')\")\n\t\t\t\tstate = StateKind.Character3\n\t\t\telif state == StateKind.Character3:\n\t\t\t\tif value != \"'\":\n\t\t\t\t\traise LexerError(cursor, \"Character literal not terminated correctly\")\n\t\t\t\tkind = TokenKind.Character\n\t\t\t\tslice = text[start + 1:index - 1]\n\t\t\telif state == StateKind.Operator:\n\t\t\t\tif text[index] not in OPERATORS:\n\t\t\t\t\tslice = text[start:index]\n\t\t\t\t\tkind = OPERATOR_TOKENS.get(slice)\n\t\t\t\t\tif not kind:\n\t\t\t\t\t\traise LexerError(cursor, \"Invalid operator: \" + slice)\n\t\t\telif state == StateKind.Verbatim1:\n\t\t\t\tstate = StateKind.Verbatim2\n\t\t\telif state == StateKind.Verbatim2:\n\t\t\t\tif value == \"`\":\n\t\t\t\t\tslice = text[start + 1:index - 1]\n\t\t\t\t\tkind = TokenKind.Verbatim\n\t\t\telse:\n\t\t\t\traise LexerError(cursor, \"Internal error: \" + str(state))\n\n\t\t\tif kind:\n\t\t\t\tif not slice:\n\t\t\t\t\tslice = text[start:index]\n\n\t\t\t\t# Output the assembled token.\n\t\t\t\ttokens.append(Token(cursor, kind, slice))\n\n\t\t\t\tcursor.char += index - start\n\n\t\t\t\tfirst = False\n\t\t\t\tkind = None\n\t\t\t\tstate = None\n\t\t\t\tslice = None\n\n\t\t\t# Keep track of the line and character numbers of the token.\n\t\t\tif value == EOL:\n\t\t\t\tif cursor.char - 1 >= 132:\n\t\t\t\t\traise ParserError(\n\t\t\t\t\t\tCursor(cursor.file, cursor.line, cursor.char - 1),\n\t\t\t\t\t\t\"Source line too long - it must be less than 132 characters long\"\n\t\t\t\t\t)\n\n\t\t\t\tcursor.line += 1\n\t\t\t\tcursor.char = 1\n\t\t\t\tfirst = True\n\t\t\telif value == TAB:\n\t\t\t\tcursor.char += TABLEN - 1\n\n\t\tif state:\n\t\t\traise LexerError(cursor, \"Unexpected end of file in token\")\n\n\t\t# Output trailing dedent tokens.\n\t\twhile len(stack) > 1:\n\t\t\tstack.pop()\n\t\t\ttokens.append(Token(cursor, TokenKind.Dedent, \"(dedent)\"))\n\n\t\t# Output empty lines AFTER the dedent tokens.\n\t\twhile blank > 0:\n\t\t\tblank -= 1\n\t\t\ttokens.append(Token(cursor, TokenKind.EndOfLine, EOL))\n\t\t\tcursor.line += 1\n\n\t\ttokens.append(Token(cursor, TokenKind.EndOfFile, \"(eof)\"))\n\n\t\treturn tokens\n\n\n#*************************************************** Abstract Syntax Tree Nodes **************************************************\n\n# The kinds of lines that the parser recognizes.\nclass BlockKind(enum.Enum):\n\n\t# An entry, in this context, is a single-line entry that spans only a single source line.\n\tEntry = enum.auto()\n\t# A block, in this context, is a block that starts with a non-empty line and allows at most one blank line between inner lines.\n\tBlock = enum.auto()\n\t# A group, in this context, is a block that starts with an empty line and requires two blank lines between inner blocks.\n\tGroup = enum.auto()\n\n\n# The complete list of the operators known to the compiler.\nclass OperatorKind(enum.Enum):\n\n\tAbsoluteSelector = enum.auto()\n\tAlgebraic_Addition = enum.auto()\n\tAlgebraic_Division = enum.auto()\n\tAlgebraic_Multiplication = enum.auto()\n\tAlgebraic_Remainder = enum.auto()\n\tAlgebraic_Subtraction = enum.auto()\n\tConditional_IfThenElse = enum.auto()\n\tLogical_And = enum.auto()\n\tLogical_Not = enum.auto()\n\tLogical_Or = enum.auto()\n\tLogical_Xor = enum.auto()\n\tMemberSelector = enum.auto()\n\tRelational_Difference = enum.auto()\n\tRelational_Equality = enum.auto()\n\tRelational_GreaterEqual = enum.auto()\n\tRelational_GreaterThan = enum.auto()\n\tRelational_LessEqual = enum.auto()\n\tRelational_LessThan = enum.auto()\n\tRelativeSelector = enum.auto()\n\n\n# Maps a token kind to its encoded (mangled) form.\nOPERATOR_ENCODINGS = {\n\tOperatorKind.Algebraic_Addition : 'add',\n\tOperatorKind.Algebraic_Division : 'div',\n\tOperatorKind.Algebraic_Multiplication : 'mul',\n\tOperatorKind.Algebraic_Remainder : 'rem',\n\tOperatorKind.Algebraic_Subtraction : 'sub',\n\tOperatorKind.Conditional_IfThenElse : 'if',\n\tOperatorKind.Logical_And : 'and',\n\tOperatorKind.Logical_Not : 'not',\n\tOperatorKind.Logical_Or : 'ior',\n\tOperatorKind.Logical_Xor : 'xor',\n\tOperatorKind.Relational_Difference : 'ne',\n\tOperatorKind.Relational_Equality : 'eq',\n\tOperatorKind.Relational_GreaterEqual : 'ge',\n\tOperatorKind.Relational_GreaterThan : 'gt',\n\tOperatorKind.Relational_LessEqual : 'le',\n\tOperatorKind.Relational_LessThan : 'lt',\n}\n\n\n# Maps token text to an operator kind.\nOPERATOR_KINDS = {\n\t'-' : OperatorKind.Algebraic_Subtraction,\n\t'%' : OperatorKind.Algebraic_Remainder,\n\t'*' : OperatorKind.Algebraic_Multiplication,\n\t'/' : OperatorKind.Algebraic_Division,\n\t'?' : OperatorKind.Conditional_IfThenElse,\n\t'+' : OperatorKind.Algebraic_Addition,\n\t'<' : OperatorKind.Relational_LessThan,\n\t'<=' : OperatorKind.Relational_LessEqual,\n\t'<>' : OperatorKind.Relational_Difference,\n\t'=' : OperatorKind.Relational_Equality,\n\t'>' : OperatorKind.Relational_GreaterThan,\n\t'>=' : OperatorKind.Relational_GreaterEqual,\n\t'and' : OperatorKind.Logical_And,\n\t'not' : OperatorKind.Logical_Not,\n\t'or' : OperatorKind.Logical_Or,\n\t'xor' : OperatorKind.Logical_Xor,\n}\n\n# Maps operator kinds to a token.\nOPERATOR_KIND_TO_TOKEN : Dict[OperatorKind, str]= {}\nfor token_string in OPERATOR_KINDS:\n\toperator = OPERATOR_KINDS[token_string]\n\tOPERATOR_KIND_TO_TOKEN[operator] = token_string\n\n\n# The complete list of types supported by the compiler (some are still missing or work-in-progress).\nclass TypeKind(enum.Enum):\n\n\tNoneType = enum.auto()\n\tCardinal = enum.auto()\n\tBoolean = enum.auto()\n\tCharacter = enum.auto()\n\tText = enum.auto()\n\tVoid = enum.auto()\n\n\n#-------------------------------------------------------- Basic Nodes ------------------------------------------------------------\n\n# The base abstract syntax tree (AST) node.\nclass Node(object):\n\n\tdef __init__(self, cursor : Cursor) -> None:\n\t\tself.__id : int = 0\n\t\tself.__above : Optional[Node] = None\n\t\tself.__cursor : Cursor = cursor\n\t\tself.__type : Optional[Node] = None\n\n\t@property\n\tdef above(self) -> Union[Node, None]:\n\t\treturn self.__above\n\n\t@above.setter\n\tdef above(self, value : Node) -> None:\n\t\tif self.__above:\n\t\t\traise InternalError(\"Data member 'above' can only be assigned once\")\n\t\tself.__above = value\n\n\t@property\n\tdef block_kind(self) -> BlockKind:\n\t\t\"\"\"Default to single-line entries that do not open up an inner scope.\"\"\"\n\t\treturn BlockKind.Entry\n\n\t@property\n\tdef cursor(self) -> Cursor:\n\t\treturn self.__cursor\n\n\t@property\n\tdef id(self) -> int:\n\t\treturn self.__id\n\n\t@id.setter\n\tdef id(self, value : int) -> None:\n\t\tassert(value >= 1)\n\t\tif self.__id:\n\t\t\traise InternalError(\"Node.id can only be assigned once: %s\" % self)\n\t\tself.__id = value\n\n\t@property\n\tdef type(self) -> Optional[Node]:\n\t\treturn self.__type\n\n\t@type.setter\n\tdef type(self, value : Node) -> None:\n\t\tif self.__type:\n\t\t\traise InternalError(\"Node.type can only be assigned once: %s\" % self)\n\t\tself.__type = value\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\tif not self.__type:\n\t\t\traise InternalError(\"Type not resolved properly\")\n\t\treturn self.__type.type_kind\n\n\t# Search upwards for a class that matches the given name. Starts at the 'self' pointer.\n\tdef findover(self, class_name : str) -> Optional[Node]:\n\t\tnext : Optional[Node] = self\n\t\twhile next != None:\n\t\t\tif next.__class__.__name__ == class_name:\n\t\t\t\treturn next\n\n\t\t\tassert(next is not None)\n\t\t\tnext = next.above\n\t\treturn None\n\n\tdef __str__(self) -> str:\n\t\tresult = self.__class__.__name__\n\t\tif self.__id:\n\t\t\tresult += \"@%04u\" % self.__id\n\t\treturn result\n\n\tdef visit(self, that, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\n# An AST node representing a subtree (non-leaf) node in the tree.\nclass Tree(Node):\n\n\tdef __init__(self, cursor : Cursor, below : List[Node]) -> None:\n\t\tNode.__init__(self, cursor)\n\t\tself.__below = below\n\t\tfor child in self.__below:\n\t\t\tchild.above = self\n\n\t@property\n\tdef below(self) -> List[Node]:\n\t\treturn self.__below\n\n\n#-------------------------------------------------------- Statement Nodes --------------------------------------------------------\n\n# Base class for all statements.\nclass Statement(Node):\n\n\tdef __init__(self, cursor : Cursor) -> None:\n\t\tNode.__init__(self, cursor)\n\n\n# A statement that consists of a list of statements, like if-then-else and try-catch.\nclass Statements(Statement):\n\n\tdef __init__(self, cursor : Cursor, below : List[Node]) -> None:\n\t\tStatement.__init__(self, cursor)\n\t\tself.__below = below\n\t\tfor child in self.__below:\n\t\t\tchild.above = self\n\n\t@property\n\tdef below(self) -> List[Node]:\n\t\treturn self.__below\n\n\n# AST node for the 'return' statement with or without a return expression.\nclass ReturnStatement(Statement):\n\n\tdef __init__(self, cursor : Cursor, expression : Node) -> None:\n\t\tStatement.__init__(self, cursor)\n\t\tself.__expression = expression\n\t\tself.__expression.above = self\n\n\t@property\n\tdef expression(self) -> Node:\n\t\treturn self.__expression\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\treturn self.__expression.type_kind\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitReturnStatement(self, argument)\n\n\n# AST node for the 'call' statement.\n# NOTE: This is virtually identical to 'CallExpression', but the latter is in the Expression hierarchy of nodes so we can't share.\nclass CallStatement(Statement):\n\n\tdef __init__(self, cursor : Cursor, name : Node, arguments : List[Node]) -> None:\n\t\tStatement.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\t\tself.__arguments = arguments\n\t\tfor argument in self.__arguments:\n\t\t\targument.above = self\n\n\t@property\n\tdef arguments(self) -> List[Node]:\n\t\treturn self.__arguments\n\n\t@property\n\tdef name(self):\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitCallStatement(self, argument)\n\n\n# AST node for the 'create' statement.\nclass CreateStatement(Statement):\n\n\tdef __init__(self, cursor : Cursor, name : Name, type : Node, expression : Node) -> None:\n\t\tStatement.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\t\tself.type = type\n\t\tself.type.above = self\n\t\tself.__expression = expression\n\t\tself.__expression.above = self\n\n\t@property\n\tdef expression(self) -> Node:\n\t\treturn self.__expression\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitCreateStatement(self, argument)\n\n\n# AST node for the 'if'/'elif'/'else' statement.\nclass IfElifElseStatement(Statements):\n\n\tdef __init__(self, cursor : Cursor, below : List[Node]) -> None:\n\t\tStatements.__init__(self, cursor, below)\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitIfElifElseStatement(self, argument)\n\n\n# AST node for an 'if' statement.\nclass IfStatement(Statements):\n\n\tdef __init__(self, cursor : Cursor, condition : Node, below : List[Node]) -> None:\n\t\tStatements.__init__(self, cursor, below)\n\n\t\tself.__condition = condition\n\t\tself.__condition.above = self\n\n\t@property\n\tdef condition(self) -> Node:\n\t\treturn self.__condition\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitIfStatement(self, argument)\n\n\n# AST node for an 'elif' statement.\nclass ElifStatement(IfStatement):\n\n\tdef __init__(self, cursor : Cursor, condition : Node, below : List[Node]) -> None:\n\t\tIfStatement.__init__(self, cursor, condition, below)\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitElifStatement(self, argument)\n\n\n# AST node for an 'else' statement.\nclass ElseStatement(Statements):\n\n\tdef __init__(self, cursor : Cursor, below : List[Node]) -> None:\n\t\tStatements.__init__(self, cursor, below)\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitElseStatement(self, argument)\n\n\n# The complete of all assignment kinds supported by the compiler (x y= z, where y is in [ :, *, +, -, ... ]).\nclass AssignmentKind(enum.Enum):\n\n\tAddition = enum.auto()\n\tDivision = enum.auto()\n\tIdentity = enum.auto()\n\tMultiplication = enum.auto()\n\tOptional = enum.auto()\n\tRemainder = enum.auto()\n\tSubtraction = enum.auto()\n\n\nASSIGNMENT_KIND_TO_TOKEN : Dict[AssignmentKind, str] = {\n\tAssignmentKind.Addition : '+=',\n\tAssignmentKind.Division : '/=',\n\tAssignmentKind.Identity : ':=',\n\tAssignmentKind.Multiplication : '*=',\n\tAssignmentKind.Optional : '?=',\n\tAssignmentKind.Remainder : '%=',\n\tAssignmentKind.Subtraction : '-=',\n}\n\n\n# AST node for a 'let' statement.\nclass LetStatement(Statement):\n\n\tdef __init__(self, cursor : Cursor, kind : AssignmentKind, name : Node, expression : Node) -> None:\n\t\tNode.__init__(self, cursor)\n\n\t\tself.__kind = kind\n\n\t\tself.__name = name\n\t\tself.__name.above = self\n\n\t\tself.__expression = expression\n\t\tself.__expression.above = self\n\n\t@property\n\tdef assignment_kind(self) -> AssignmentKind:\n\t\treturn self.__kind\n\n\t@property\n\tdef expression(self) -> Node:\n\t\treturn self.__expression\n\n\t@property\n\tdef name(self) -> Node:\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitLetStatement(self, argument)\n\n\n# A synthetic statement generated for the value assignments that optionally appear in a constructor invokation.\nclass SetStatement(Statement):\n\n\tdef __init__(self, cursor : Cursor, kind : AssignmentKind, name : Node, expression : Node) -> None:\n\t\tStatement.__init__(self, cursor)\n\n\t\tself.__kind = kind\n\n\t\tself.__name = name\n\t\tself.__name.above = self\n\n\t\tself.__expression = expression\n\t\tself.__expression.above = self\n\n\t@property\n\tdef assignment_kind(self) -> AssignmentKind:\n\t\treturn self.__kind\n\n\t@property\n\tdef expression(self) -> Node:\n\t\treturn self.__expression\n\n\t@property\n\tdef name(self) -> Node:\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitSetStatement(self, argument)\n\n\n# AST node for the (currently internal) 'operator' statement.\nclass OperatorStatement(Statements):\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tcursor : Cursor,\n\t\t\ttype : Node,\n\t\t\toperator_kind : OperatorKind,\n\t\t\tparameters : List[Node],\n\t\t\tbelow : List[Node],\n\t\t\tintrinsic : bool = False\n\t\t) -> None:\n\t\tStatements.__init__(self, cursor, below)\n\n\t\tself.type = type\n\t\tself.type.above = self\n\n\t\tself.__parameters = parameters\n\t\tfor parameter in self.__parameters:\n\t\t\tparameter.above = self\n\n\t\tself.__operator_kind = operator_kind\n\t\tself.__intrinsic = intrinsic\n\n\t@property\n\tdef intrinsic(self) -> bool:\n\t\treturn self.__intrinsic\n\n\t@property\n\tdef operator_kind(self) -> OperatorKind:\n\t\treturn self.__operator_kind\n\n\t@property\n\tdef parameters(self) -> List[Node]:\n\t\treturn self.__parameters\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitOperatorStatement(self, argument)\n\n\n# The different kinds of routines known to the compiler.\nclass RoutineKind(enum.Enum):\n\n\tConstructor = enum.auto()\n\tEntry = enum.auto()\n\tFunction = enum.auto()\n\tMethod = enum.auto()\n\n\nROUTINE_KIND_TO_TOKEN : Dict[RoutineKind, str] = {\n\tRoutineKind.Entry : \"entry\",\n\tRoutineKind.Function : \"function\",\n\tRoutineKind.Method : \"method\",\n}\n\n\n# AST node shared by all routines (functions, methods, entry points, etc.).\nclass Routine(Statements):\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tcursor : Cursor,\n\t\t\troutine_kind : RoutineKind,\n\t\t\ttype : Node,\n\t\t\tname : Name,\n\t\t\tparameters : List[Parameter],\n\t\t\tbelow : List[Node],\n\t\t\tintrinsic : bool = False\n\t\t) -> None:\n\t\tStatements.__init__(self, cursor, below)\n\n\t\tself.__routine_kind = routine_kind\n\n\t\t# NOTE: Use the type setter inherited from Node.\n\t\tself.type = type\n\t\tself.type.above = self\n\n\t\tself.__parameters = parameters\n\t\tfor parameter in self.__parameters:\n\t\t\tparameter.above = self\n\n\t\tself.__name = name\n\t\tself.__name.above = self\n\n\t\tself.__intrinsic = intrinsic\n\n\t@property\n\tdef intrinsic(self) -> bool:\n\t\treturn self.__intrinsic\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\t@property\n\tdef parameters(self) -> List[Parameter]:\n\t\treturn self.__parameters\n\n\t@property\n\tdef routine_kind(self) -> RoutineKind:\n\t\treturn self.__routine_kind\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitRoutine(self, argument)\n\n\n# AST node for a single 'throw' statement.\nclass ThrowStatement(Node):\n\n\tdef __init__(self, cursor : Cursor, expression : Node) -> None:\n\t\tNode.__init__(self, cursor)\n\t\tself.__expression = expression\n\t\tself.__expression.above = self\n\t\tself.__local : Optional[str] = None\n\n\t@property\n\tdef expression(self):\n\t\treturn self.__expression\n\n\t@property\n\tdef local(self) -> str:\n\t\tassert(self.__local is not None)\n\t\treturn self.__local\n\n\t@local.setter\n\tdef local(self, value) -> None:\n\t\tassert(self.__local is None)\n\t\tself.__local = value\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitThrowStatement(self, argument)\n\n\n#*********************************************************** Definitions *********************************************************\n\n# A single definition, be it a type definition or whatever.\nclass Definition(Node):\n\n\tdef __init__(self, cursor : Cursor) -> None:\n\t\tNode.__init__(self, cursor)\n\n\n# A compound definition that includes a number of children, such as a module or a class.\nclass Definitions(Definition):\n\n\tdef __init__(self, cursor : Cursor, below : List[Node]) -> None:\n\t\tDefinition.__init__(self, cursor)\n\t\tself.__below = below\n\t\tfor child in self.__below:\n\t\t\tchild.above = self\n\n\t@property\n\tdef below(self):\n\t\treturn self.__below\n\n\n# AST node for the 'class' definition, which may contain values, guards, and methods.\nclass ClassDefinition(Definitions):\n\n\tdef __init__(self, cursor : Cursor, name : Name, below : List[Node]) -> None:\n\t\tDefinitions.__init__(self, cursor, below)\n\t\tself.__name = name\n\t\tself.__name.above = name\n\n\t@property\n\tdef name(self):\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitClassDefinition(self, argument)\n\n\n# AST node for a single 'field' definition.\nclass FieldDefinition(Definition):\n\n\tdef __init__(self, cursor : Cursor, name : Name, type : Type) -> None:\n\t\tDefinition.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\t\tself.type = type\n\t\tself.type.above = self\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitFieldDefinition(self, argument)\n\n\nclass DirectionKind(enum.Enum):\n\n\tIn = enum.auto()\n\tIo = enum.auto()\n\tOut = enum.auto()\n\n\n# AST node for the 'guard' definition, which may contain an optional getter and/or an optional setter.\nclass GuardDefinition(Definitions):\n\n\tdef __init__(self, cursor : Cursor, name : Name, subject : Node, below : List[Node]):\n\t\tDefinitions.__init__(self, cursor, below)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\t\tself.__subject = subject\n\t\tself.__subject.above = self\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\t@property\n\tdef subject(self) -> Node:\n\t\treturn self.__subject\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitGuardDefinition(self, argument)\n\n\n# AST node for the 'module' definition, which contains all other definitions.\nclass ModuleDefinition(Definitions):\n\n\tdef __init__(self, cursor : Cursor, name : Name, below : List[Node]) -> None:\n\t\tDefinitions.__init__(self, cursor, below)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitModuleDefinition(self, argument)\n\n\n# AST node that represents a single parameter in a list of parameters.\nclass Parameter(Node):\n\n\tdef __init__(self, cursor : Cursor, name : Name, type : Node) -> None:\n\t\tNode.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\t\tself.type = type\n\t\tself.type.above = self\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitParameter(self, argument)\n\n\n# AST node for the entire program (the parent of all modules).\nclass ProgramDefinition(Definitions):\n\n\tdef __init__(self, cursor : Cursor, below : List[Node]) -> None:\n\t\tDefinitions.__init__(self, cursor, below)\n\t\tself.__symbols = SymbolTable()\n\n\t@property\n\tdef symbols(self) -> SymbolTable:\n\t\treturn self.__symbols\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitProgramDefinition(self, argument)\n\n\n#-------------------------------------------------------- Expression Nodes -------------------------------------------------------\n\n# The base class for expression nodes in the AST.\nclass Expression(Node):\n\n\tdef __init__(self, cursor : Cursor) -> None:\n\t\tNode.__init__(self, cursor)\n\n\n# A simple boolean literal ('false' or 'true').\nclass BooleanLiteral(Expression):\n\n\tdef __init__(self, cursor : Cursor, type : Node, value : bool) -> None:\n\t\tExpression.__init__(self, cursor)\n\t\tself.type = type\n\t\tself.type.above = self\n\t\tself.__value = value\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\tassert(self.type is not None)\n\t\treturn self.type.type_kind\n\n\t@property\n\tdef value(self) -> bool:\n\t\treturn self.__value\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitBooleanLiteral(self, argument)\n\n\n# A call to a function.\nclass CallExpression(Expression):\n\n\tdef __init__(self, cursor : Cursor, name : Name, arguments : List[Node]) -> None:\n\t\tExpression.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\t\tself.__arguments = arguments\n\t\tfor argument in self.__arguments:\n\t\t\targument.above = self\n\n\t@property\n\tdef arguments(self) -> List[Node]:\n\t\treturn self.__arguments\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitCallExpression(self, argument)\n\n\n# A cardinal literal aka an unsigned literal.\nclass CardinalLiteral(Expression):\n\n\tdef __init__(self, cursor : Cursor, type : Node, value : int) -> None:\n\t\tExpression.__init__(self, cursor)\n\t\tself.type = type\n\t\tself.type.above = self\n\t\tassert(value >= 0)\n\t\tself.__value = value\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\tassert(self.type is not None)\n\t\treturn self.type.type_kind\n\n\t@property\n\tdef value(self) -> int:\n\t\treturn self.__value\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitCardinalLiteral(self, argument)\n\n\n# A constructor and its optional list of initializers: ClassName(a := x, b := y, c := z).\nclass ConstructorExpression(Expression):\n\n\tdef __init__(self, cursor : Cursor, name : Node, values : List[SetStatement]) -> None:\n\t\tExpression.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = name\n\t\tself.__values = values\n\t\tfor value in self.__values:\n\t\t\tvalue.above = self\n\n\t@property\n\tdef name(self):\n\t\treturn self.__name\n\n\t@property\n\tdef values(self) -> List[SetStatement]:\n\t\treturn self.__values\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\tassert(self.type is not None)\n\t\treturn self.type.type_kind\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitConstructorExpression(self, argument)\n\n\n# Implements the 'none' expression, which is identical to a null pointer.\nclass NoneExpression(Expression):\n\n\tdef __init__(self, cursor : Cursor, type : Node) -> None:\n\t\tExpression.__init__(self, cursor)\n\t\tself.type = type\n\t\tself.type.above = self\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitNoneExpression(self, argument)\n\n\n# Implements the 'void' expression, which is identical to no expression at all.\nclass VoidExpression(Expression):\n\n\tdef __init__(self, cursor : Cursor, type : Node) -> None:\n\t\tExpression.__init__(self, cursor)\n\t\tself.type = type\n\t\tself.type.above = self\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitVoidExpression(self, argument)\n\n\n# Class used for all unary expressions in the AST.\nclass UnaryExpression(Expression):\n\n\tdef __init__(self, cursor : Cursor, operator_kind : OperatorKind, first : Node) -> None:\n\t\tExpression.__init__(self, cursor)\n\n\t\tself.__first = first\n\t\tself.__first.above = self\n\t\tself.__operator_kind = operator_kind\n\n\t@property\n\tdef first(self) -> Node:\n\t\treturn self.__first\n\n\t@property\n\tdef operator_kind(self) -> OperatorKind:\n\t\treturn self.__operator_kind\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\treturn self.__first.type_kind\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitUnaryExpression(self, argument)\n\n\n# A simple '!first' expression: An absolute reference.\nclass AbsoluteExpression(UnaryExpression):\n\n\tdef __init__(self, cursor : Cursor, operator_kind : OperatorKind, first : Node) -> None:\n\t\tUnaryExpression.__init__(self, cursor, operator_kind, first)\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitAbsoluteExpression(self, argument)\n\n\n# Class used for all binary expressions in the AST.\nclass BinaryExpression(UnaryExpression):\n\n\tdef __init__(self, cursor : Cursor, kind : OperatorKind, first : Node, other : Node) -> None:\n\t\tUnaryExpression.__init__(self, cursor, kind, first)\n\t\tself.__other = other\n\t\tself.__other.above = self\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\tassert(self.type is not None)\n\t\treturn self.type.type_kind\n\n\t@property\n\tdef other(self) -> Node:\n\t\treturn self.__other\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitBinaryExpression(self, argument)\n\n\n# A simple '.first' expression: A class-relative reference.\nclass RelativeExpression(Expression):\n\n\tdef __init__(self, cursor : Cursor, name : Name) -> None:\n\t\tExpression.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\n\t@property\n\tdef name(self) -> Name:\n\t\treturn self.__name\n\n\t@property\n\tdef text(self) -> str:\n\t\treturn self.__name.text\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitRelativeExpression(self, argument)\n\n\n# A simple 'first.other' expression.\nclass SelectorExpression(BinaryExpression):\n\n\tdef __init__(self, cursor : Cursor, first : Node, other : Node) -> None:\n\t\tBinaryExpression.__init__(self, cursor, OperatorKind.MemberSelector, first, other)\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitSelectorExpression(self, argument)\n\n\n# Class used for all tertiary expressions in the AST.\nclass TertiaryExpression(BinaryExpression):\n\n\tdef __init__(self, cursor : Cursor, kind : OperatorKind, first : Node, other : Node, third : Node) -> None:\n\t\tBinaryExpression.__init__(self, cursor, kind, first, other)\n\t\tself.__third = third\n\t\tself.__third.above = self\n\n\t@property\n\tdef third(self) -> Node:\n\t\treturn self.__third\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\treturn self.first.type_kind\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitTertiaryExpression(self, argument)\n\n\n# Enumeration that enumerates the possible kinds of names in the AST.\nclass NameKind(enum.Enum):\n\n\tClass = enum.auto()\n\tConstructor = enum.auto()\n\tEntry = enum.auto()\n\tField = enum.auto()\n\tFunction = enum.auto()\n\tGuard = enum.auto()\n\tLocal = enum.auto()\n\tModule = enum.auto()\n\tName = enum.auto()\n\tOperator = enum.auto()\n\tParameter = enum.auto()\n\tReference = enum.auto()\n\tRoutine = enum.auto()\n\tType = enum.auto()\n\n\n# A definition of a name (names are never embedded directly into the AST node, rather they are linked in as a Name node).\nclass Name(Node):\n\n\tdef __init__(self, cursor : Cursor, namekind : NameKind, text : str) -> None:\n\t\tNode.__init__(self, cursor)\n\t\tself.__namekind = namekind\n\t\tself.__text = text\n\n\t@property\n\tdef namekind(self) -> NameKind:\n\t\treturn self.__namekind\n\n\t@namekind.setter\n\tdef namekind(self, value : NameKind) -> None:\n\t\tself.__namekind = value\n\n\t@property\n\tdef text(self) -> str:\n\t\treturn self.__text\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitName(self, argument)\n\n\n# A reference to a symbol name.\nclass Reference(Name):\n\n\tdef __init__(self, cursor : Cursor, text : str) -> None:\n\t\tName.__init__(self, cursor, NameKind.Reference, text)\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitReference(self, argument)\n\n\n#-------------------------------------------------------- Type Nodes -------------------------------------------------------------\n\n# The base class of all AST type nodes.\nclass Type(Node):\n\n\tdef __init__(self, cursor : Cursor) -> None:\n\t\tNode.__init__(self, cursor)\n\n\n# The name of a type.\nclass TypeName(Type):\n\n\tdef __init__(self, cursor : Cursor, name : Name) -> None:\n\t\tType.__init__(self, cursor)\n\t\tself.__name = name\n\t\tself.__name.above = self\n\n\t@property\n\tdef name(self):\n\t\treturn self.__name\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\tif not self.type:\n\t\t\traise InternalError(\"TypeName.type never assigned\")\n\t\treturn self.type.type_kind\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitTypeName(self, argument)\n\n\n# The type of the 'none' expression.\nclass NoneType(Type):\n\n\tdef __init__(self, cursor : Cursor) -> None:\n\t\tType.__init__(self, cursor)\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\treturn TypeKind.NoneType\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitNoneType(self, argument)\n\n\n# The type of 'return' statements WITHOUT an expression.\nclass VoidType(Type):\n\n\tdef __init__(self, cursor : Cursor) -> None:\n\t\tType.__init__(self, cursor)\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\treturn TypeKind.Void\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitVoidType(self, argument)\n\n\n# Base class for types that incorporate a width (in bits) field.\nclass WidthType(Type):\n\n\tdef __init__(self, cursor : Cursor, width : int) -> None:\n\t\tType.__init__(self, cursor)\n\t\tself.__width = width\n\n\t@property\n\tdef width(self) -> int:\n\t\treturn self.__width\n\n\n# The type of the 'boolean' type.\nclass BooleanType(WidthType):\n\n\tdef __init__(self, cursor : Cursor, width : int) -> None:\n\t\tassert(width == 1 or width % 8 == 0)\n\t\tWidthType.__init__(self, cursor, width)\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\treturn TypeKind.Boolean\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitBooleanType(self, argument)\n\n\n# The type of the 'cardinalN' types.\nclass CardinalType(WidthType):\n\n\tdef __init__(self, cursor : Cursor, width : int) -> None:\n\t\tassert(width % 8 == 0)\n\t\tWidthType.__init__(self, cursor, width)\n\n\t@property\n\tdef type_kind(self) -> TypeKind:\n\t\treturn TypeKind.Cardinal\n\n\tdef visit(self, visitor : Visitor, argument : Any) -> Any:\n\t\treturn visitor.visitCardinalType(self, argument)\n\n\n#******************************************************* Symbol Table Management *************************************************\n\n# Exception class used to report errors discovered by the symbol table (\"Duplicate symbol\", \"Unknown symbol\").\nclass SymbolError(CursorError):\n\n\tdef __init__(self, cursor : Cursor, text : str) -> None:\n\t\tCursorError.__init__(self, cursor, text)\n\n\tdef __str__(self) -> str:\n\t\treturn \"%s Error: %s\" % ( self.cursor, self.text )\n\n\n# The scoped symbol table manager class.\nclass SymbolTable(object):\n\n\tdef __init__(self) -> None:\n\t\tself.__stack : List[Dict[str, Node]] = [{}]\n\n\tdef __insert(self, identifier : str, declaration : Node) -> None:\n\t\tself.__stack[-1][identifier] = declaration\n\n\tdef __locateCurrent(self, name : str) -> Optional[Node]:\n\t\treturn self.__stack[-1].get(name)\n\n\tdef __locateUpwards(self, name : str) -> Optional[Node]:\n\t\tfor scope in reversed(self.__stack):\n\t\t\tfound = scope.get(name)\n\t\t\tif found:\n\t\t\t\treturn found\n\t\treturn None\n\n\tdef enter(self, cursor : Cursor, identifier : str, declaration : Node) -> None:\n\t\tif self.__stack[-1].get(identifier) != declaration:\n\t\t\tself.insert(cursor, identifier, declaration)\n\t\tself.__stack.append({})\n\n\tdef leave(self, cursor : Cursor, identifier : str, declaration : Node) -> None:\n\t\tself.__stack.pop()\n\t\tassert(self.__locateCurrent(identifier) == declaration)\n\n\tdef insert(self, cursor : Cursor, identifier : str, declaration : Node) -> None:\n\t\tif self.__locateCurrent(identifier):\n\t\t\traise SymbolError(cursor, \"Duplicate definition of symbol: %s\" % identifier)\n\t\tself.__insert(identifier, declaration)\n\n\tdef locate(self, cursor : Cursor, name : str) -> Node:\n\t\tresult = self.__locateUpwards(name)\n\t\tif not result:\n\t\t\traise SymbolError(cursor, \"Unknown symbol: %s\" % name)\n\t\treturn result\n\n\n#------------------------------------------------ The Braceless0 Standard Environment --------------------------------------------\n\n# A symbol table populated with the built-in Braceless run-time environment.\nclass Environment(SymbolTable):\n\n\tdef __init__(self) -> None:\n\t\tSymbolTable.__init__(self)\n\n\t\tcursor = Cursor(\"Environment\")\n\t\twidths = [ 1, 8, 16, 32, 64 ]\n\n\t\t# Register: operator not(first is TYPE) is TYPE\n\t\ttype1 : Optional[Node] = None\n\t\treturn_type : Optional[Node] = None\n\t\tfor width in widths:\n\t\t\tif width == 1:\n\t\t\t\ttype1 = BooleanType(cursor, width)\n\t\t\t\treturn_type = BooleanType(cursor, width)\n\t\t\telse:\n\t\t\t\ttype1 = CardinalType(cursor, width)\n\t\t\t\treturn_type = CardinalType(cursor, width)\n\t\t\tparameter1 = Parameter(cursor, Name(cursor, NameKind.Parameter, \"first\"), type1)\n\t\t\toperator = OperatorStatement(cursor, return_type, OPERATOR_KINDS[\"not\"], [ parameter1 ], [], True)\n\t\t\tself.insert(cursor, self.mangle(operator), operator)\n\n\t\t# Register: operator LOGICAL(first is TYPE, other is TYPE) is TYPE\n\t\ttype2 : Optional[Node] = None\n\t\tfor label in [ \"and\", \"or\", \"xor\" ]:\n\t\t\tfor width in widths:\n\t\t\t\tif width == 1:\n\t\t\t\t\ttype1 = BooleanType(cursor, width)\n\t\t\t\t\ttype2 = BooleanType(cursor, width)\n\t\t\t\t\treturn_type = BooleanType(cursor, width)\n\t\t\t\telse:\n\t\t\t\t\ttype1 = CardinalType(cursor, width)\n\t\t\t\t\ttype2 = CardinalType(cursor, width)\n\t\t\t\t\treturn_type = CardinalType(cursor, width)\n\t\t\t\tparameter1 = Parameter(cursor, Name(cursor, NameKind.Parameter, \"first\"), type1)\n\t\t\t\tparameter2 = Parameter(cursor, Name(cursor, NameKind.Parameter, \"other\"), type2)\n\t\t\t\toperator = OperatorStatement(cursor, return_type, OPERATOR_KINDS[label], [ parameter1, parameter2 ], [], True)\n\t\t\t\tself.insert(cursor, self.mangle(operator), operator)\n\n\t\t# Register: operator ALGEBRAIC(first is TYPE, other is TYPE) is TYPE\n\t\tfor label in [ \"+\", \"-\", \"*\", \"/\", \"%\" ]:\n\t\t\tfor width in widths:\n\t\t\t\t# Boolean algebraic operations does not make any sense so avoid creating operators for them.\n\t\t\t\tif width == 1:\n\t\t\t\t\tcontinue\n\n\t\t\t\ttype1 = CardinalType(cursor, width)\n\t\t\t\ttype2 = CardinalType(cursor, width)\n\t\t\t\treturn_type = CardinalType(cursor, width)\n\t\t\t\tparameter1 = Parameter(cursor, Name(cursor, NameKind.Parameter, \"first\"), type1)\n\t\t\t\tparameter2 = Parameter(cursor, Name(cursor, NameKind.Parameter, \"other\"), type2)\n\t\t\t\toperator = OperatorStatement(cursor, return_type, OPERATOR_KINDS[label], [ parameter1, parameter2 ], [], True)\n\t\t\t\tself.insert(cursor, self.mangle(operator), operator)\n\n\t\t# Register: operator RELATIONAL(first is TYPE, other is TYPE) is boolean\n\t\tfor label in [ \"<\", \"<=\", \">\", \">=\", \"=\", \"<>\" ]:\n\t\t\tfor width in widths:\n\t\t\t\tif width == 1:\n\t\t\t\t\ttype1 = BooleanType(cursor, width)\n\t\t\t\t\ttype2 = BooleanType(cursor, width)\n\t\t\t\telse:\n\t\t\t\t\ttype1 = CardinalType(cursor, width)\n\t\t\t\t\ttype2 = CardinalType(cursor, width)\n\t\t\t\treturn_type = BooleanType(cursor, width)\n\t\t\t\tparameter1 = Parameter(cursor, Name(cursor, NameKind.Parameter, \"first\"), type1)\n\t\t\t\tparameter2 = Parameter(cursor, Name(cursor, NameKind.Parameter, \"other\"), type2)\n\t\t\t\toperator = OperatorStatement(cursor, return_type, OPERATOR_KINDS[label], [ parameter1, parameter2 ], [], True)\n\t\t\t\tself.insert(cursor, self.mangle(operator), operator)\n\n\tdef insert(self, cursor : Cursor, identifier : str, declaration : Node) -> None:\n\t\tSymbolTable.insert(self, cursor, identifier, declaration)\n\n\tdef mangle(self, declaration : Node) -> str:\n\t\t# Let the mangler compute the appropriate mangled name.\n\t\tmangler = Mangler()\n\t\tmangler.visit(declaration)\n\t\treturn mangler.result\n\n\n#************************************************** Abstract Syntax Tree Visitor *************************************************\n\n# Interface for visitors that walk the abstract syntax tree.\nclass Visitor(object):\n\n\t# Convenient short-hand that allows visiting any node without specifying its type.\n\tdef visit(self, that : Node, argument : Any = None) -> Any:\n\t\tthat.visit(self, argument)\n\n\tdef visitAbsoluteExpression(self, that : AbsoluteExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitBinaryExpression(self, that : BinaryExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitBooleanType(self, that : BooleanType, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitCallExpression(self, that : CallExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitCallStatement(self, that : CallStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitCardinalLiteral(self, that : CardinalLiteral, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitCardinalType(self, that : CardinalType, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitClassDefinition(self, that : ClassDefinition, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitConstructorExpression(self, that : ConstructorExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitCreateStatement(self, that : CreateStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitElifStatement(self, that : ElifStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitElseStatement(self, that : ElseStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitGuardDefinition(self, that : GuardDefinition, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitIfElifElseStatement(self, that : IfElifElseStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitIfStatement(self, that : IfStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitSetStatement(self, that : SetStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitLetStatement(self, that : LetStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitModuleDefinition(self, that : ModuleDefinition, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitName(self, that : Name, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitNoneExpression(self, that : NoneExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitNoneType(self, that : NoneType, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitOperatorStatement(self, that : OperatorStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitParameter(self, that : Parameter, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitReference(self, that : Reference, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitRelativeExpression(self, that : RelativeExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitReturnStatement(self, that : ReturnStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitRoutine(self, that : Routine, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitSelectorExpression(self, that : SelectorExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitTertiaryExpression(self, that : TertiaryExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitThrowStatement(self, that : ThrowStatement, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitTypeName(self, that : TypeName, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitUnaryExpression(self, that : UnaryExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitFieldDefinition(self, that : FieldDefinition, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitVoidExpression(self, that : VoidExpression, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\tdef visitVoidType(self, that : VoidType, argument : Any) -> Any:\n\t\traise InternalError(\"Abstract method called\")\n\n\n# A custom visitor that descends the AST and calls the '_visit()' method on each node in the tree.\nclass Sweeper(Visitor):\n\n\tdef _visit(self, that : Node, argument : Any) -> Any:\n\t\traise InternalError(\"Sweeper._visit() has not been overloaded\")\n\n\tdef _visitRoutine(self, that : Routine, argument : Any) -> Any:\n\t\tthat.name.visit(self, argument)\n\t\tfor parameter in that.parameters:\n\t\t\tparameter.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitAbsoluteExpression(self, that : AbsoluteExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.first.visit(self, argument)\n\n\tdef visitBinaryExpression(self, that : BinaryExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.first.visit(self, argument)\n\t\tthat.other.visit(self, argument)\n\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitBooleanType(self, that : BooleanType, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\tdef visitCallExpression(self, that : CallExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tfor argument1 in that.arguments:\n\t\t\targument1.visit(self, argument)\n\n\tdef visitCallStatement(self, that : CallStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tfor argument1 in that.arguments:\n\t\t\targument1.visit(self, argument)\n\n\tdef visitCardinalLiteral(self, that : CardinalLiteral, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitCardinalType(self, that : CardinalType, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\tdef visitClassDefinition(self, that : ClassDefinition, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitConstructorExpression(self, that : ConstructorExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tfor value in that.values:\n\t\t\tvalue.visit(self, argument)\n\n\tdef visitCreateStatement(self, that : CreateStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitElifStatement(self, that : ElifStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.condition.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitElseStatement(self, that : ElseStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitFieldDefinition(self, that : FieldDefinition, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitGuardDefinition(self, that : GuardDefinition, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tthat.subject.visit(self, argument)\n\n\tdef visitIfStatement(self, that : IfStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.condition.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitIfElifElseStatement(self, that : IfElifElseStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitLetStatement(self, that : LetStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitModuleDefinition(self, that : ModuleDefinition, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitName(self, that : Name, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\tdef visitNoneExpression(self, that : NoneExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\tdef visitNoneType(self, that : NoneType, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\tdef visitParameter(self, that : Parameter, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitReference(self, that : Reference, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\tdef visitRelativeExpression(self, that : RelativeExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\n\tdef visitReturnStatement(self, that : ReturnStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitRoutine(self, that : Routine, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tself._visitRoutine(that, argument)\n\n\tdef visitSelectorExpression(self, that : SelectorExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.first.visit(self, argument)\n\t\tthat.other.visit(self, argument)\n\n\tdef visitSetStatement(self, that : SetStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitTertiaryExpression(self, that : TertiaryExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.first.visit(self, argument)\n\t\tthat.other.visit(self, argument)\n\t\tthat.third.visit(self, argument)\n\n\tdef visitThrowStatement(self, that : ThrowStatement, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitTypeName(self, that : TypeName, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.name.visit(self, argument)\n\n\tdef visitUnaryExpression(self, that : UnaryExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\t\tthat.first.visit(self, argument)\n\n\tdef visitVoidExpression(self, that : VoidExpression, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\tdef visitVoidType(self, that : VoidType, argument : Any) -> Any:\n\t\tself._visit(that, argument)\n\n\n# Assign a small unique positive integer ID to each node in the abstract syntax tree (for dumping purposes).\nclass Enumerator(Sweeper):\n\n\tdef __init__(self) -> None:\n\t\tSweeper.__init__(self)\n\t\tself.__index = 1\n\n\tdef _visit(self, that : Node, argument : Any) -> Any:\n\t\tthat.id = self.__index\n\t\tself.__index += 1\n\n\n# Check that all nodes, except the ProgramDefinition node, have a valid parent.\nclass OrphanChecker(Sweeper):\n\n\tdef __init__(self) -> None:\n\t\tSweeper.__init__(self)\n\n\tdef _visit(self, that : Node, argument : Any) -> Any:\n\t\tif not isinstance(that, ProgramDefinition) and that.above is None:\n\t\t\traise InternalError(\"Node %u has no parent\" % that.id)\n\n\n#************************************************* Abstract Syntax Tree Dumper ***************************************************\n\n# Dumps the abstract syntax tree in a human-readable format.\nclass Dumper(Sweeper):\n\n\tdef _visit(self, that : Any, stream : TextWriter) -> None:\n\t\tabove = (\" (above=%04u)\" % that.above.id) if that.above else \"\"\n\t\tstream.flush(\"%04u %s%s %s:\" % ( that.id, that.__class__.__name__, above, that.cursor ))\n\t\tdel above\n\n\t\tstream.indent()\n\n\t\t# Below follows a load of brutish code, but I am too lazy to write a full visitor just to dump the AST.\n\t\ttry:\n\t\t\tstream.flush(\"assignment_kind = %s\" % that.assignment_kind)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"arguments = [ %s ]\" % ', '.join(map(str, that.arguments)))\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tbelow = that.below\n\t\t\tif len(below) <= 4:\n\t\t\t\tstream.flush(\"below = [ %s ]\" % ', '.join(map(str, that.below)))\n\t\t\telse:\n\t\t\t\tstream.flush(\"below = [\")\n\t\t\t\tstream.indent()\n\t\t\t\tindex = 0\n\t\t\t\twhile index < len(below):\n\t\t\t\t\tstream.write(\"%s, \" % str(below[index]))\n\t\t\t\t\tindex += 1\n\t\t\t\t\tif index % 4 == 0 or index == len(below):\n\t\t\t\t\t\tstream.flush(\"\")\n\t\t\t\tstream.dedent(\"]\")\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"condition = %s\" % that.condition)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"expression = %s\" % that.expression)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"first = %s\" % that.first)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"intrinsic = %s\" % that.intrinsic)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"mangled = %s\" % that.mangled)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"name = %s\" % that.name)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"operator = %s\" % that.operator)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"operator_kind = %s\" % that.operator_kind)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"other = %s\" % that.other)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"parameter = %s\" % that.parameter)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"parameters = [ %s ]\" % ', '.join(map(str, that.parameters)))\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"routine_kind = %s\" % that.routine_kind)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"text = '%s'\" % that.text)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"third = %s\" % that.third)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\tif that.type:\n\t\t\tstream.flush(\"type = %s\" % that.type)\n\n\t\ttry:\n\t\t\tstream.flush(\"type_kind = %s\" % that.type_kind)\n\t\texcept InternalError:\n\t\t\tpass\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"value = %s\" % that.value)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tstream.flush(\"width = %u\" % that.width)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\tstream.dedent()\n\n\n#************************************************** Abstract Syntax Tree Printer *************************************************\n\n# Formats the abstract syntax tree, or a part of it, as Braceless0 source code for use in the LlvmWriter comments.\nclass Printer(Visitor):\n\n\t# Convenient short-hand that allows visiting any node without specifying its type.\n\tdef visit(self, that : Node, argument : Any = None) -> str:\n\t\treturn that.visit(self, argument)\n\n\tdef visitBinaryExpression(self, that : BinaryExpression, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += that.first.visit(self, argument)\n\t\tresult += SPC\n\t\tresult += OPERATOR_KIND_TO_TOKEN[that.operator_kind]\n\t\tresult += SPC\n\t\tresult += that.other.visit(self, argument)\n\t\treturn result\n\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> str:\n\t\treturn str(that.value).lower()\n\n\tdef visitBooleanType(self, that : BooleanType, argument : Any) -> str:\n\t\treturn 'boolean'\n\n\tdef visitCallExpression(self, that : CallExpression, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += that.name.visit(self, argument)\n\t\tresult += \"(\"\n\t\tfor argument1 in that.arguments:\n\t\t\tif argument1 != that.arguments[0]:\n\t\t\t\tresult += \", \"\n\t\t\tresult += argument1.visit(self, argument)\n\t\tresult += \")\"\n\t\treturn result\n\n\tdef visitCallStatement(self, that : CallStatement, argument : Any) -> str:\n\t\tresult = \"call \"\n\t\tresult += that.name.visit(self, argument)\n\t\tresult += \"(\"\n\t\tfor argument1 in that.arguments:\n\t\t\tif argument1 != that.arguments[0]:\n\t\t\t\tresult += \", \"\n\t\t\tresult += argument1.visit(self, argument)\n\t\tresult += \")\"\n\t\treturn result\n\n\tdef visitCardinalLiteral(self, that : CardinalLiteral, argument : Any) -> str:\n\t\treturn str(that.value)\n\n\tdef visitCardinalType(self, that : CardinalType, argument : Any) -> str:\n\t\treturn \"cardinal\" + str(that.width // 8)\n\n\tdef visitClassDefinition(self, that : ClassDefinition, argument : Any) -> Any:\n\t\treturn \"class %s:\" % that.name.text\n\n\tdef visitCreateStatement(self, that : CreateStatement, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += \"create \"\n\t\tresult += that.name.visit(self, argument)\n\t\tresult += \" is \"\n\t\tassert(that.type is not None)\n\t\tresult += that.type.visit(self, argument)\n\t\tresult += \" := \"\n\t\tresult += that.expression.visit(self, argument)\n\t\treturn result\n\n\tdef visitElifStatement(self, that : ElifStatement, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += \"elif \"\n\t\tresult += that.condition.visit(self, argument)\n\t\tresult += \":\"\n\t\treturn result\n\n\tdef visitElseStatement(self, that : ElseStatement, argument : Any) -> str:\n\t\treturn \"else:\"\n\n\tdef visitIfElifElseStatement(self, that : IfElifElseStatement, argument : Any) -> str:\n\t\traise InternalError(\"Unsupported\")\n\n\tdef visitIfStatement(self, that : IfStatement, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += \"if \"\n\t\tresult += that.condition.visit(self, argument)\n\t\tresult += \":\"\n\t\treturn result\n\n\tdef visitLetStatement(self, that : LetStatement, argument : Any) -> str:\n\t\tresult = \"let \"\n\t\tresult += that.name.visit(self, argument)\n\t\tresult += \" \"\n\t\tresult += ASSIGNMENT_KIND_TO_TOKEN[that.assignment_kind]\n\t\tresult += \" \"\n\t\tresult += that.expression.visit(self, argument)\n\t\treturn result\n\n\tdef visitModuleDefinition(self, that : ModuleDefinition, argument : Any) -> str:\n\t\traise InternalError(\"Unsupported\")\n\n\tdef visitName(self, that : Name, argument : Any) -> str:\n\t\treturn that.text\n\n\tdef visitNoneExpression(self, that : NoneExpression, argument : Any) -> str:\n\t\treturn \"none\"\n\n\tdef visitNoneType(self, that : NoneType, argument : Any) -> str:\n\t\traise InternalError(\"Should never be called (handled higher up the tree)\")\n\n\tdef visitOperatorStatement(self, that : OperatorStatement, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += \"operator \"\n\t\tresult += OPERATOR_KIND_TO_TOKEN[that.operator_kind]\n\t\tresult += \"(\"\n\t\tfor argument1 in that.parameters:\n\t\t\tif argument1 != that.parameters[0]:\n\t\t\t\tresult += \", \"\n\t\t\tresult += argument1.visit(self, argument)\n\t\tresult += \") is \"\n\t\tassert(that.type is not None)\n\t\tresult += that.type.visit(self, argument)\n\t\treturn result\n\n\tdef visitParameter(self, that : Parameter, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += that.name.visit(self, argument)\n\t\tresult += \" is \"\n\t\tassert(that.type is not None)\n\t\tresult += that.type.visit(self, argument)\n\t\treturn result\n\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> str:\n\t\traise InternalError(\"Unsupported\")\n\n\tdef visitReference(self, that : Reference, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += that.text\n\t\treturn result\n\n\tdef visitReturnStatement(self, that : ReturnStatement, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += \"return\"\n\t\tif that.type_kind is not TypeKind.Void:\n\t\t\tresult += \" \"\n\t\t\tresult += that.expression.visit(self, argument)\n\t\treturn result\n\n\tdef visitRoutine(self, that : Routine, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += ROUTINE_KIND_TO_TOKEN[that.routine_kind]\n\t\tresult += \" \"\n\t\tresult += that.name.visit(self, argument)\n\n\t\tresult += \"(\"\n\t\tfor parameter in that.parameters:\n\t\t\tif parameter != that.parameters[0]:\n\t\t\t\tresult += \", \"\n\t\t\tresult += parameter.visit(self, argument)\n\t\tresult += \")\"\n\n\t\tif that.type_kind != TypeKind.Void:\n\t\t\tresult += \" is \"\n\t\t\tassert(that.type is not None)\n\t\t\tresult += that.type.visit(self, argument)\n\n\t\t# NOTE: Don't output the body as we're using this for comment lines in the generated LLVM IR.\n\t\treturn result\n\n\tdef visitSelectorExpression(self, that : SelectorExpression, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += that.first.visit(self, argument)\n\t\tresult += \".\"\n\t\tresult += that.other.visit(self, argument)\n\t\treturn result\n\n\tdef visitTertiaryExpression(self, that : TertiaryExpression, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += \"if \"\n\t\tresult += that.first.visit(self, argument)\n\t\tresult += \" then \"\n\t\tresult += that.other.visit(self, argument)\n\t\tresult += \" else \"\n\t\tresult += that.third.visit(self, argument)\n\t\treturn result\n\n\tdef visitThrowStatement(self, that : ThrowStatement, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += \"throw \"\n\t\tresult += that.expression.visit(self, argument)\n\t\treturn result\n\n\tdef visitUnaryExpression(self, that : UnaryExpression, argument : Any) -> str:\n\t\tresult = \"\"\n\t\tresult += OPERATOR_KIND_TO_TOKEN[that.operator_kind]\n\t\tresult += \" \"\n\t\tresult += that.first.visit(self, argument)\n\t\treturn result\n\n\tdef visitVoidExpression(self, that : VoidExpression, argument : Any) -> str:\n\t\traise InternalError(\"Should never be called (handled higher up the tree)\")\n\n\tdef visitVoidType(self, that : VoidType, argument : Any) -> str:\n\t\traise InternalError(\"Should never be called (handled higher up the tree)\")\n\n\n#************************************************************** Parser ***********************************************************\n\n# Maps additive token kinds into operator kinds for quick recognition of these.\nOPERATOR_ADDITIVE = {\n\tTokenKind.Operator_Minus : OperatorKind.Algebraic_Subtraction,\n\tTokenKind.Operator_Plus : OperatorKind.Algebraic_Addition,\n}\n\n\n# Maps multiplicative token kinds to operator kinds for quick recognition of these.\nOPERATOR_MULTIPLICATIVE = {\n\tTokenKind.Operator_Asterisk : OperatorKind.Algebraic_Multiplication,\n\tTokenKind.Operator_Percent : OperatorKind.Algebraic_Remainder,\n\tTokenKind.Operator_Slash : OperatorKind.Algebraic_Division,\n}\n\n\n# Maps logical token kinds to operator kinds for quick recognition of these.\nOPERATOR_LOGICAL = {\n\tTokenKind.Keyword_And : OperatorKind.Logical_And,\n\tTokenKind.Keyword_Or : OperatorKind.Logical_Or,\n\tTokenKind.Keyword_Xor : OperatorKind.Logical_Xor,\n}\n\n\n# Maps relational token kinds to operator kinds for quick recognition of these.\nOPERATOR_RELATIONAL = {\n\tTokenKind.Operator_Difference : OperatorKind.Relational_Difference,\n\tTokenKind.Operator_Equal : OperatorKind.Relational_Equality,\n\tTokenKind.Operator_GreaterEqual : OperatorKind.Relational_GreaterEqual,\n\tTokenKind.Operator_GreaterThan : OperatorKind.Relational_GreaterThan,\n\tTokenKind.Operator_LessEqual : OperatorKind.Relational_LessEqual,\n\tTokenKind.Operator_LessThan : OperatorKind.Relational_LessThan,\n}\n\n\n# Exception class used to report errors discovered while parsing.\nclass ParserError(LexerError):\n\n\tdef __init__(self, cursor : Cursor, text : str) -> None:\n\t\tLexerError.__init__(self, cursor, text)\n\n\n# The complete Braceless0 recursive descent parser.\nclass Parser(object):\n\n\tdef __init__(self) -> None:\n\t\tself.__offset = 0\n\t\tself.__tokens : Optional[List[Token]] = None\n\n\t\t# Initialize class-wide constants.\n\t\tself.__assignments = {\n\t\t\tTokenKind.Operator_AsteriskEqual : AssignmentKind.Multiplication,\n\t\t\tTokenKind.Operator_ColonEqual : AssignmentKind.Identity,\n\t\t\tTokenKind.Operator_MinusEqual : AssignmentKind.Subtraction,\n\t\t\tTokenKind.Operator_PercentEqual : AssignmentKind.Remainder,\n\t\t\tTokenKind.Operator_PlusEqual : AssignmentKind.Addition,\n\t\t\tTokenKind.Operator_QuestionEqual : AssignmentKind.Optional,\n\t\t\tTokenKind.Operator_SlashEqual : AssignmentKind.Division,\n\t\t}\n\n\t# Fetches, without advancing the token index pointer, the token at the specified offset (0 = current token).\n\tdef fetch(self, offset : int = 0) -> Token:\n\n\t\t# Allow arbitrary lookahead (LL(k)).\n\t\toffset += self.__offset\n\n\t\t# If the lookahead passes the end of the line, simply return the last token (always an EOL token).\n\t\tassert(self.__tokens is not None)\n\t\tif offset >= len(self.__tokens):\n\t\t\toffset = -1\n\n\t\t# Return the requested token.\n\t\treturn self.__tokens[offset]\n\n\t# Matches the current token against the specified token kind and the optional text.\n\tdef match(self, kind : TokenKind) -> Token:\n\t\tassert(self.__tokens is not None)\n\t\tresult = self.__tokens[self.__offset]\n\t\tif result.kind != kind:\n\t\t\traise ParserError(result.cursor, \"Token mismatch: Expected %s, found %s\" % ( kind, result.kind ))\n\n\t\tself.__offset += 1\n\n\t\treturn result\n\n\t# Parse an entire source module.\n\tdef parse(self, file : str) -> ModuleDefinition:\n\t\t# Read the file into memory and convert it from UTF-8 to Unicode (UTF-32).\n\t\ttext = open(file, 'rb').read().decode('utf8')\n\n\t\t# Convert the file's text content into tokens.\n\t\tself.__offset = 0\n\t\tself.__tokens = Lexer().scan(file, text)\n\n\t\tif False:\n\t\t\tfor token in self.__tokens:\n\t\t\t\tprint(token)\n\n\t\treturn self.parseModuleDefinition()\n\n\t# Parses an argument list including the beginning and ending parenthesises.\n\tdef parseArguments(self) -> List[Node]:\n\t\tself.match(TokenKind.ParenthesisBegin)\n\t\targuments = []\n\t\twhile self.fetch().kind != TokenKind.ParenthesisClose:\n\t\t\targuments.append(self.parseExpression())\n\n\t\t\tif self.fetch().kind != TokenKind.ParenthesisClose:\n\t\t\t\tself.match(TokenKind.Comma)\n\t\t\t\tself.match(TokenKind.Space)\n\n\t\t\t\tif self.fetch().kind == TokenKind.ParenthesisClose:\n\t\t\t\t\traise ParserError(self.fetch().cursor, \"Trailing garbage in function call\")\n\n\t\tself.match(TokenKind.ParenthesisClose)\n\t\treturn arguments\n\n\t# Parses the specified number of blank lines.\n\tdef parseBlank(self, count : int) -> None:\n\t\tindex = 0\n\t\twhile self.fetch().kind == TokenKind.EndOfLine:\n\t\t\tself.match(TokenKind.EndOfLine)\n\t\t\tindex += 1\n\n\t\t\tif False and index > count:\n\t\t\t\traise ParserError(self.fetch().cursor, \"More than %s empty lines in sequence\" % count)\n\n\t\tif self.fetch().kind != TokenKind.EndOfFile and index < count:\n\t\t\traise ParserError(self.fetch().cursor, \"Blank line expected\")\n\n\t# Parses a 'call' statement.\n\tdef parseCallStatement(self) -> CallStatement:\n\t\tstart = self.match(TokenKind.Keyword_Call)\n\t\tself.match(TokenKind.Space)\n\t\t# TODO: The name of the called routine CAN be a complex expression, not just a simple name...\n\t\tname = self.parseName(NameKind.Routine)\n\t\targuments = self.parseArguments()\n\t\treturn CallStatement(start.cursor, name, arguments)\n\n\t# Parses a 'class' definition.\n\tdef parseClassDefinition(self) -> ClassDefinition:\n\t\t# Parse and discard class comment, if any.\n\t\tself.parseComments()\n\n\t\tstart = self.match(TokenKind.Keyword_Class)\n\t\tself.match(TokenKind.Space)\n\t\tname = self.parseName(NameKind.Class)\n\t\tself.match(TokenKind.Colon)\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\t# Parse blank line at the beginning of new group.\n\t\tself.parseBlank(1)\n\t\tself.match(TokenKind.Indent)\n\n\t\tactions = {\n\t\t\tTokenKind.Keyword_Create : self.parseConstructorDefinition,\n\t\t\tTokenKind.Keyword_Field : self.parseFieldDefinition,\n\t\t\tTokenKind.Keyword_Guard : self.parseGuardDefinition,\n\t\t}\n\n\t\t# Parse all definitions in the class.\n\t\tbelow = []\n\t\twhile True:\n\t\t\tself.parseComments()\n\n\t\t\tother = self.fetch()\n\t\t\tif not other.kind in actions:\n\t\t\t\tbreak\n\n\t\t\tbelow.append(actions[other.kind]())\n\t\t\tother = self.fetch()\n\n\t\t\t# Parse and discard trailing empty lines. TODO: Handle dedenting.\n\t\t\tif other.kind == TokenKind.EndOfLine:\n\t\t\t\tself.parseBlank(1)\n\n\t\t# Check that there is at least one statement in the block that we've just parsed.\n\t\tif not below:\n\t\t\traise ParserError(start.cursor, \"Expected grouping statement\")\n\n\t\tself.match(TokenKind.Dedent)\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\treturn ClassDefinition(start.cursor, name, below)\n\n\t# Parses a constructor definition.\n\tdef parseConstructorDefinition(self) -> Routine:\n\t\tstart = self.match(TokenKind.Keyword_Create)\n\t\tself.match(TokenKind.ParenthesisBegin)\n\t\tself.match(TokenKind.ParenthesisClose)\n\t\ttype_position = self.match(TokenKind.Colon).cursor\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\tself.match(TokenKind.Indent)\n\t\tbelow = self.parseRoutineBody()\n\t\tself.match(TokenKind.Dedent)\n\n\t\tname = Name(start.cursor, NameKind.Constructor, \"create\")\n\t\ttype = VoidType(type_position)\n\n\t\treturn Routine(start.cursor, RoutineKind.Constructor, type, name, [], below, False)\n\n\t# Parses a 'create' statement.\n\tdef parseCreateStatement(self) -> CreateStatement:\n\t\tstart = self.match(TokenKind.Keyword_Create)\n\t\tself.match(TokenKind.Space)\n\t\tlabel = self.parseName(NameKind.Local)\n\t\ttype = self.parseTypeDesignator()\n\t\tself.match(TokenKind.Space)\n\t\tself.match(TokenKind.Operator_ColonEqual)\n\t\tself.match(TokenKind.Space)\n\t\texpression = self.parseExpression()\n\t\tself.match(TokenKind.EndOfLine)\n\t\treturn CreateStatement(start.cursor, label, type, expression)\n\n\t# Parses a leading and optional sequence of comments.\n\tdef parseComments(self) -> None:\n\t\twhile self.fetch().kind == TokenKind.Comment:\n\t\t\tself.match(TokenKind.Comment)\n\t\t\tself.match(TokenKind.EndOfLine)\n\n\t# Parses an arbitrary logical condition while enforcing certain limitations upon its complexity.\n\tdef parseCondition(self) -> Node:\n\t\treturn self.parseConditionIf()\n\n\t# Parses an 'and' condition.\n\tdef parseConditionAnd(self) -> Node:\n\t\tresult = self.parseConditionNot()\n\n\t\tif self.fetch().kind == TokenKind.Space and self.fetch(1).kind == TokenKind.Keyword_And:\n\t\t\tself.match(TokenKind.Space)\n\t\t\toperator = self.match(TokenKind.Keyword_And)\n\t\t\tself.match(TokenKind.Space)\n\t\t\tsecond = self.parseConditionNot()\n\t\t\tresult = BinaryExpression(operator.cursor, OperatorKind.Logical_And, result, second)\n\n\t\treturn result\n\n\t# Parses an optional tertiary operator (if x then y else z).\n\tdef parseConditionIf(self) -> Node:\n\t\tif self.fetch().kind != TokenKind.Keyword_If:\n\t\t\treturn self.parseConditionOr()\n\n\t\tif_token = self.match(TokenKind.Keyword_If)\n\t\tself.match(TokenKind.Space)\n\t\tcondition = self.parseConditionOr()\n\t\tself.match(TokenKind.Space)\n\t\tself.match(TokenKind.Keyword_Then)\n\t\tself.match(TokenKind.Space)\n\t\tfirst = self.parseConditionOr()\n\t\tself.match(TokenKind.Space)\n\t\tself.match(TokenKind.Keyword_Else)\n\t\tself.match(TokenKind.Space)\n\t\tother = self.parseConditionOr()\n\n\t\tresult = TertiaryExpression(if_token.cursor, OperatorKind.Conditional_IfThenElse, condition, first, other)\n\t\treturn result\n\n\t# Parses a 'not' condition.\n\tdef parseConditionNot(self) -> Node:\n\t\t# Allow at most one 'not' in the sub-expression (no need for more and it reduces readability).\n\t\tnegation = None\n\t\tif self.fetch().kind == TokenKind.Keyword_Not:\n\t\t\tnegation = self.match(TokenKind.Keyword_Not)\n\t\t\tself.match(TokenKind.Space)\n\n\t\tfirst = self.parseExpressionRelational()\n\n\t\tif negation:\n\t\t\tfirst = UnaryExpression(negation.cursor, OperatorKind.Logical_Not, first)\n\n\t\treturn first\n\n\t# Parses an 'or' or 'xor' condition.\n\tdef parseConditionOr(self) -> Node:\n\t\tresult = self.parseConditionAnd()\n\n\t\tif self.fetch().kind == TokenKind.Space and self.fetch(1).kind in [ TokenKind.Keyword_Or, TokenKind.Keyword_Xor ]:\n\t\t\tself.match(TokenKind.Space)\n\t\t\toperator = self.match(self.fetch().kind)\n\t\t\tself.match(TokenKind.Space)\n\t\t\tsecond = self.parseConditionAnd()\n\t\t\tresult = BinaryExpression(operator.cursor, OPERATOR_LOGICAL[operator.kind], result, second)\n\n\t\treturn result\n\n\t# Parses a direction (in, io, or out).\n\tdef parseDirection(self) -> DirectionKind:\n\t\tdirections = {\n\t\t\tTokenKind.Keyword_In : DirectionKind.In,\n\t\t\tTokenKind.Keyword_Io : DirectionKind.Io,\n\t\t\tTokenKind.Keyword_Out : DirectionKind.Out\n\t\t}\n\t\tstart = self.fetch()\n\t\tdirection = directions.get(start.kind)\n\t\tif not direction:\n\t\t\traise ParserError(start.cursor, \"Expected direction ('in', 'io', or 'out')\")\n\t\tself.match(start.kind)\n\n\t\treturn direction\n\n\t# Parses an 'entry' definition.\n\tdef parseEntryDefinition(self) -> Node:\n\t\tstart = self.match(TokenKind.Keyword_Entry)\n\t\tself.match(TokenKind.Space)\n\t\tlabel = self.parseName(NameKind.Entry)\n\n\t\tparameters : List[Parameter] = []\n\t\tself.match(TokenKind.ParenthesisBegin)\n\t\tself.match(TokenKind.ParenthesisClose)\n\n\t\ttype = self.parseRoutineType()\n\t\tself.match(TokenKind.Colon)\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\tself.match(TokenKind.Indent)\n\t\tbelow = self.parseRoutineBody()\n\t\tself.match(TokenKind.Dedent)\n\n\t\treturn Routine(start.cursor, RoutineKind.Entry, type, label, parameters, below)\n\n\tdef parseExpression(self) -> Node:\n\t\treturn self.parseCondition()\n\n\t# Parses an optional additive expression and returns an AST subtree for the expression.\n\tdef parseExpressionAdditive(self) -> Node:\n\t\tfirst = self.parseExpressionMultiplicative()\n\n\t\tif self.fetch().kind == TokenKind.Space and self.fetch(1).kind in OPERATOR_ADDITIVE:\n\t\t\t# Parse algebraic expression such as \"x + y\".\n\t\t\tself.match(TokenKind.Space)\n\t\t\toperator = self.match(self.fetch().kind)\n\t\t\tself.match(TokenKind.Space)\n\t\t\tother = self.parseExpressionMultiplicative()\n\n\t\t\tfirst = BinaryExpression(operator.cursor, OPERATOR_ADDITIVE[operator.kind], first, other)\n\n\t\treturn first\n\n\t# Parses an optional multiplicative expression and returns an AST subtree for the expression.\n\tdef parseExpressionMultiplicative(self) -> Node:\n\t\tfirst = self.parseExpressionSuffix()\n\n\t\tif self.fetch().kind == TokenKind.Space and self.fetch(1).kind in OPERATOR_MULTIPLICATIVE:\n\t\t\t# Parse a multiplicative expression such as \"x * y\".\n\t\t\tself.match(TokenKind.Space)\n\t\t\toperator = self.match(self.fetch().kind)\n\t\t\tself.match(TokenKind.Space)\n\t\t\tother = self.parseExpressionSuffix()\n\n\t\t\tfirst = BinaryExpression(operator.cursor, OPERATOR_MULTIPLICATIVE[operator.kind], first, other)\n\n\t\treturn first\n\n\t# Parses a comparison expression and returns an AST subtree representing that expression.\n\tdef parseExpressionRelational(self) -> Node:\n\t\tfirst = self.parseExpressionAdditive()\n\n\t\tif self.fetch().kind == TokenKind.Space and self.fetch(1).kind in OPERATOR_RELATIONAL:\n\t\t\t# Parse relational expression such as \"x > y\".\n\t\t\tself.match(TokenKind.Space)\n\t\t\toperator = self.match(self.fetch().kind)\n\t\t\tself.match(TokenKind.Space)\n\t\t\tother = self.parseExpressionAdditive()\n\n\t\t\tfirst = BinaryExpression(operator.cursor, OPERATOR_RELATIONAL[operator.kind], first, other)\n\n\t\treturn first\n\n\t# Parses a simple expression and returns an AST subtree representing that expression.\n\tdef parseExpressionSimple(self) -> Node:\n\t\tlookahead = self.fetch()\n\n\t\tresult : Optional[Node] = None\n\t\tif lookahead.kind == TokenKind.Name:\n\t\t\t# An identifier.\n\t\t\ttoken = self.match(TokenKind.Name)\n\t\t\tresult = Reference(token.cursor, token.text)\n\t\telif lookahead.kind == TokenKind.Dot:\n\t\t\t# A reference to a field or guard.\n\t\t\ttoken = self.match(TokenKind.Dot)\n\t\t\tfirst = self.parseName(NameKind.Field)\n\t\t\tresult = RelativeExpression(token.cursor, first)\n\t\telif lookahead.kind == TokenKind.Literal_Cardinal:\n\t\t\t# An unsigned literal.\n\t\t\ttoken = self.match(TokenKind.Literal_Cardinal)\n\t\t\tresult = CardinalLiteral(token.cursor, CardinalType(token.cursor, 64), int(token.text))\n\t\telif lookahead.kind in [ TokenKind.Keyword_False, TokenKind.Keyword_True ]:\n\t\t\t# A boolean literal.\n\t\t\ttoken = self.match(lookahead.kind)\n\t\t\tvalue = { TokenKind.Keyword_False : False, TokenKind.Keyword_True : True }[token.kind]\n\t\t\tresult = BooleanLiteral(lookahead.cursor, BooleanType(token.cursor, BITS_PER_BOOLEAN), value)\n\t\telif lookahead.kind == TokenKind.Keyword_New:\n\t\t\ttoken = self.match(lookahead.kind)\n\t\t\tself.match(TokenKind.Space)\n\t\t\tname = self.parseName(NameKind.Type)\n\t\t\tvalues = self.parseSetStatements()\n\t\t\tresult = ConstructorExpression(token.cursor, name, values)\n\t\telse:\n\t\t\traise ParserError(lookahead.cursor, \"Expected expression\")\n\n\t\treturn result\n\n\t# Parses zero or more dot (.), array indexing, or call suffix expressions and returns an AST subtree.\n\tdef parseExpressionSuffix(self) -> Node:\n\t\tfirst = self.parseExpressionSimple()\n\n\t\t# Parse zero or more path, indexer, or function application specifications.\n\t\twhile True:\n\t\t\tlookahead = self.fetch()\n\t\t\tif lookahead.kind == TokenKind.Dot:\n\t\t\t\ttoken = self.match(TokenKind.Dot)\n\t\t\t\tother = self.parseExpressionSimple()\n\t\t\t\tfirst = SelectorExpression(token.cursor, first, other)\n\t\t\telif lookahead.kind == TokenKind.BracketBegin:\n\t\t\t\traise ParserError(lookahead.cursor, \"Arrays are not supported yet\")\n\t\t\telif lookahead.kind == TokenKind.ParenthesisBegin:\n\t\t\t\targuments = self.parseArguments()\n\t\t\t\t# TODO: The cast of first into a Name is bogus! It only works for the simple language that we handle now...\n\t\t\t\tfirst = CallExpression(first.cursor, cast(Name, first), arguments)\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\treturn first\n\n\t# Parses a 'function' statement.\n\tdef parseFunctionDefinition(self) -> Routine:\n\t\tstart = self.match(TokenKind.Keyword_Function)\n\t\tself.match(TokenKind.Space)\n\t\tlabel = self.parseName(NameKind.Function)\n\t\tparameters = self.parseParameters()\n\t\ttype = self.parseRoutineType()\n\t\tself.match(TokenKind.Colon)\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\tself.match(TokenKind.Indent)\n\t\tbelow = self.parseRoutineBody()\n\t\tself.match(TokenKind.Dedent)\n\n\t\treturn Routine(start.cursor, RoutineKind.Function, type, label, parameters, below)\n\n\t# Parses a 'guard' definition.\n\tdef parseGuardDefinition(self) -> GuardDefinition:\n\t\t# Parse single-line guard, handle the optional getter and setter definitions below.\n\t\tstart = self.match(TokenKind.Keyword_Guard)\n\t\tself.match(TokenKind.Space)\n\t\tname = self.parseName(NameKind.Guard)\n\t\tself.match(TokenKind.Space)\n\t\tself.match(TokenKind.Keyword_Is)\n\t\tself.match(TokenKind.Space)\n\t\tdirection = self.parseDirection()\n\t\tself.match(TokenKind.Space)\n\t\tsubject = self.parseExpressionSimple()\n\n\t\tbelow : List[Node] = []\n\n\t\t# TODO: Parse optional getter definition.\n\t\t# TODO: Parse optional setter definition.\n\n\t\treturn GuardDefinition(start.cursor, name, subject, below)\n\n\t# Parses an 'if' statement and returns a LIST containing the statemement.\n\tdef _parseIfStatement(self) -> List[Node]:\n\t\t# Parse the 'if' line itself.\n\t\tstart = self.match(TokenKind.Keyword_If)\n\t\tself.match(TokenKind.Space)\n\t\tcondition = self.parseCondition()\n\t\tself.match(TokenKind.Colon)\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\t# Parse statement block.\n\t\tself.match(TokenKind.Indent)\n\t\tstatements = self.parseRoutineBody(TokenKind.Keyword_If)\n\t\tself.match(TokenKind.Dedent)\n\n\t\t# Return an AST node for the parsed 'if' statement.\n\t\treturn [ IfStatement(start.cursor, condition, statements) ]\n\n\t# Parses zero or more 'elif' statements.\n\tdef _parseElifStatements(self) -> List[Node]:\n\t\tbelow : List[Node] = []\n\t\twhile self.fetch().kind == TokenKind.Keyword_Elif:\n\t\t\t# Parse the 'elif' line itself.\n\t\t\tstart = self.match(TokenKind.Keyword_Elif)\n\t\t\tself.match(TokenKind.Space)\n\t\t\tcondition = self.parseCondition()\n\t\t\tself.match(TokenKind.Colon)\n\t\t\tself.match(TokenKind.EndOfLine)\n\n\t\t\t# Parse statement block.\n\t\t\tself.match(TokenKind.Indent)\n\t\t\tstatements = self.parseRoutineBody(TokenKind.Keyword_If)\n\t\t\tself.match(TokenKind.Dedent)\n\n\t\t\t# Prepare to return the parsed 'elif' statement.\n\t\t\tbelow.append(ElifStatement(start.cursor, condition, statements))\n\n\t\treturn below\n\n\t# Parses an optional 'else' statement and returns a possibly empty LIST of the statement.\n\tdef _parseElseStatement(self) -> List[Node]:\n\t\tbelow : List[Node] = []\n\t\tif self.fetch().kind == TokenKind.Keyword_Else:\n\t\t\t# Parse the 'else' line itself.\n\t\t\tstart = self.match(TokenKind.Keyword_Else)\n\t\t\tself.match(TokenKind.Colon)\n\t\t\tself.match(TokenKind.EndOfLine)\n\n\t\t\t# Parse statement block.\n\t\t\tself.match(TokenKind.Indent)\n\t\t\tstatements = self.parseRoutineBody(TokenKind.Keyword_If)\n\t\t\tself.match(TokenKind.Dedent)\n\n\t\t\t# Prepare to return the parsed 'elif' statement.\n\t\t\tbelow.append(ElseStatement(start.cursor, statements))\n\n\t\treturn below\n\n\t# Parses an 'if'/'elif'/'else' statement.\n\tdef parseIfElifElseStatement(self) -> IfElifElseStatement:\n\t\t# Parse and discard leading comment, if any.\n\t\tself.parseComments()\n\n\t\tbelow : List[Node] = []\n\n\t\t# Parse mandatory 'if' statement.\n\t\tbelow += self._parseIfStatement()\n\n\t\t# Parse optional 'elif' statement(s).\n\t\tbelow += self._parseElifStatements()\n\n\t\t# Parse optional 'else' statement.\n\t\tbelow += self._parseElseStatement()\n\n\t\treturn IfElifElseStatement(below[0].cursor, below)\n\n\t# Parses a 'let' statement.\n\tdef parseLetStatement(self) -> LetStatement:\n\t\tself.parseComments()\n\n\t\tstart = self.match(TokenKind.Keyword_Let)\n\t\tself.match(TokenKind.Space)\n\t\tname = self.parseExpressionSuffix()\n\t\tself.match(TokenKind.Space)\n\n\t\tassignment = self.fetch()\n\t\toperator = self.__assignments.get(assignment.kind)\n\t\tif not operator:\n\t\t\traise ParserError(assignment.cursor, \"Expected assignment operator\")\n\t\tself.match(assignment.kind)\n\t\tdel assignment\n\n\t\tself.match(TokenKind.Space)\n\t\texpression = self.parseExpression()\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\treturn LetStatement(start.cursor, operator, name, expression)\n\n\t# Parses a 'module' statement.\n\tdef parseModuleDefinition(self) -> ModuleDefinition:\n\t\t# Parse and discard module comment, if any.\n\t\tself.parseComments()\n\n\t\t# Parse mandatory 'module' statement.\n\t\tstart = self.match(TokenKind.Keyword_Module)\n\t\tself.match(TokenKind.Space)\n\t\tname = self.parseName(NameKind.Module)\n\t\tself.match(TokenKind.Colon)\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\t# Parse blank line at the beginning of new group.\n\t\tself.parseBlank(1)\n\t\tself.match(TokenKind.Indent)\n\n\t\tactions = {\n\t\t\tTokenKind.Keyword_Class : self.parseClassDefinition,\n\t\t\tTokenKind.Keyword_Entry : self.parseEntryDefinition,\n\t\t\tTokenKind.Keyword_Function : self.parseFunctionDefinition,\n\t\t}\n\n\t\t# Parse all statements in the module.\n\t\tbelow = []\n\t\twhile True:\n\t\t\tself.parseComments()\n\n\t\t\tother = self.fetch()\n\t\t\tif not other.kind in actions:\n\t\t\t\tbreak\n\n\t\t\tbelow.append(actions[other.kind]())\n\t\t\tother = self.fetch()\n\n\t\t\t# Parse and discard two empty lines.\n\t\t\twhile other.kind == TokenKind.EndOfLine:\n\t\t\t\tself.match(other.kind)\n\t\t\t\tother = self.fetch()\n\n\t\t# Check that there is at least one statement in the block that we've just parsed.\n\t\tif not below:\n\t\t\traise ParserError(other.cursor, \"Expected grouping statement\")\n\n\t\tself.match(TokenKind.Dedent)\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\treturn ModuleDefinition(start.cursor, name, below)\n\n\t# Parses a name and returns a suitable AST node for the parsed identifier.\n\tdef parseName(self, namekind) -> Name:\n\t\tlookahead = self.fetch()\n\t\tif lookahead.kind != TokenKind.Name:\n\t\t\traise ParserError(lookahead.cursor, \"Expected name\")\n\t\ttoken = self.match(lookahead.kind)\n\t\treturn Name(token.cursor, namekind, token.text)\n\n\t# Parses a single parameter in a parameter list.\n\tdef parseParameter(self) -> Parameter:\n\t\tname = self.parseName(NameKind.Parameter)\n\t\ttype = self.parseTypeDesignator()\n\t\treturn Parameter(name.cursor, name, type)\n\n\t# Parses an optional list of formal parameters and the enclosing parenthesises, and returns a list of them.\n\tdef parseParameters(self) -> List[Parameter]:\n\t\tresult = []\n\t\tstart = self.match(TokenKind.ParenthesisBegin)\n\t\tif self.fetch().kind != TokenKind.ParenthesisClose:\n\t\t\t# If one or more parameters are present.\n\t\t\tresult.append(self.parseParameter())\n\t\t\twhile self.fetch().kind == TokenKind.Comma:\n\t\t\t\tself.match(TokenKind.Comma)\n\t\t\t\tself.match(TokenKind.Space)\n\t\t\t\tresult.append(self.parseParameter())\n\t\tself.match(TokenKind.ParenthesisClose)\n\t\treturn result\n\n\t# Parses a path of one or more names separated by dots and returns the result.\n\tdef parsePath(self, namekind) -> List[Name]:\n\t\tnames = [ self.parseName(namekind)]\n\t\twhile self.fetch().kind == TokenKind.Dot:\n\t\t\tself.match(TokenKind.Dot)\n\t\t\tnames.append(self.parseName(namekind))\n\t\treturn names\n\n\t# Parses a 'return' statement.\n\tdef parseReturnStatement(self) -> ReturnStatement:\n\t\tstart = self.match(TokenKind.Keyword_Return)\n\n\t\t# Parse optional return expression.\n\t\tif self.fetch().kind == TokenKind.Space:\n\t\t\tself.match(TokenKind.Space)\n\t\t\texpression = self.parseExpression()\n\t\telse:\n\t\t\texpression = VoidExpression(self.fetch().cursor, VoidType(self.fetch().cursor))\n\n\t\tself.match(TokenKind.EndOfLine)\n\n\t\treturn ReturnStatement(start.cursor, expression)\n\n\t# Parses the body of a routine and returns a list of the statements in that body.\n\tdef parseRoutineBody(self, exclude : TokenKind = None) -> List[Node]:\n\t\t# Save the start of the routine for use when we create a node for it below.\n\t\tstart = self.fetch()\n\n\t\tbelow = []\n\n\t\tactions = {\n\t\t\tTokenKind.Keyword_Call : self.parseCallStatement,\n\t\t\tTokenKind.Keyword_Create : self.parseCreateStatement,\n\t\t\tTokenKind.Keyword_If : self.parseIfElifElseStatement,\n\t\t\tTokenKind.Keyword_Let : self.parseLetStatement,\n\t\t\tTokenKind.Keyword_Return : self.parseReturnStatement,\n\t\t\tTokenKind.Keyword_Throw : self.parseThrowStatement,\n\t\t}\n\n\t\t# Disallow nested compound statements to keep Braceless code readable (this is a FEATURE!).\n\t\tif exclude:\n\t\t\tdel actions[exclude]\n\t\t\tdel exclude\n\n\t\t# Parse all statements in the routine's body.\n\t\twhile True:\n\t\t\tself.parseComments()\n\n\t\t\tother = self.fetch()\n\t\t\tif not other.kind in actions:\n\t\t\t\tbreak\n\n\t\t\tbelow.append(actions[other.kind]())\n\t\t\tother = self.fetch()\n\n\t\t\tif other.kind == TokenKind.EndOfLine:\n\t\t\t\tself.match(TokenKind.EndOfLine)\n\n\t\t# Check that there is at least one statement in the block that we've just parsed.\n\t\tif not below:\n\t\t\traise ParserError(other.cursor, \"Expected routine body statement\")\n\n\t\treturn below\n\n\t# Parses the optional return type designator for a routine such as an entry point or a function.\n\tdef parseRoutineType(self) -> Node:\n\t\tif self.fetch().kind == TokenKind.Space:\n\t\t\ttype = self.parseTypeDesignator()\n\t\telse:\n\t\t\ttype = VoidType(self.fetch().cursor)\n\t\treturn type\n\n\t# Parses a single constructor set statement: a := x.\n\tdef parseSetStatement(self) -> SetStatement:\n\t\tname = self.parseExpressionSimple()\n\t\tself.match(TokenKind.Space)\n\t\tself.match(TokenKind.Operator_ColonEqual)\n\t\tself.match(TokenKind.Space)\n\t\tvalue = self.parseExpression()\n\t\treturn SetStatement(name.cursor, AssignmentKind.Identity, name, value)\n\n\t# Parses a list of constructor set statements: (a := x, b := y, c := z).\n\tdef parseSetStatements(self) -> List[SetStatement]:\n\t\tself.match(TokenKind.ParenthesisBegin)\n\t\tresult : List[SetStatement] = []\n\t\twhile self.fetch().kind != TokenKind.ParenthesisClose:\n\t\t\tresult.append(self.parseSetStatement())\n\n\t\t\tif self.fetch().kind != TokenKind.ParenthesisClose:\n\t\t\t\tself.match(TokenKind.Comma)\n\t\t\t\tself.match(TokenKind.Space)\n\n\t\t\t\tif self.fetch().kind == TokenKind.ParenthesisClose:\n\t\t\t\t\traise ParserError(self.fetch().cursor, \"Trailing garbage in constructor guard invokation list\")\n\t\tself.match(TokenKind.ParenthesisClose)\n\t\treturn result\n\n\t# Parses a 'throw' statement, which always includes an expression.\n\tdef parseThrowStatement(self) -> Node:\n\t\tstart = self.match(TokenKind.Keyword_Throw)\n\t\tself.match(TokenKind.Space)\n\t\texpression = self.parseExpression()\n\t\treturn ThrowStatement(start.cursor, expression)\n\n\t# Parses a type and returns an AST subtree expressing that type.\n\tdef parseType(self) -> Type:\n\t\tlookahead = self.fetch()\n\t\tresult : Node\n\t\tcardinals = {\n\t\t\tTokenKind.Keyword_Cardinal1 : 1 * BITS_PER_BYTE,\n\t\t\tTokenKind.Keyword_Cardinal2 : 2 * BITS_PER_BYTE,\n\t\t\tTokenKind.Keyword_Cardinal4 : 4 * BITS_PER_BYTE,\n\t\t\tTokenKind.Keyword_Cardinal8 : 8 * BITS_PER_BYTE\n\t\t}\n\t\tif lookahead.kind == TokenKind.Keyword_Boolean:\n\t\t\tstart = self.match(TokenKind.Keyword_Boolean)\n\t\t\tresult = BooleanType(start.cursor, BITS_PER_BOOLEAN)\n\t\telif lookahead.kind in cardinals:\n\t\t\tstart = self.match(lookahead.kind)\n\t\t\tresult = CardinalType(start.cursor, cardinals[lookahead.kind])\n\t\telif lookahead.kind == TokenKind.Name:\n\t\t\tname = self.parseName(NameKind.Type)\n\t\t\tresult = TypeName(name.cursor, name)\n\t\telse:\n\t\t\traise ParserError(lookahead.cursor, \"Expected type\")\n\t\treturn result\n\n\t# Parses a type designator (' is TYPE') and returns an AST subtree expressing the type.\n\tdef parseTypeDesignator(self) -> Node:\n\t\tself.match(TokenKind.Space)\n\t\tself.match(TokenKind.Keyword_Is)\n\t\tself.match(TokenKind.Space)\n\t\treturn self.parseType()\n\n\t# Parses a 'value' definition.\n\tdef parseFieldDefinition(self) -> FieldDefinition:\n\t\t# Parse single-line value.\n\t\tstart = self.match(TokenKind.Keyword_Field)\n\t\tself.match(TokenKind.Space)\n\t\tname = self.parseName(NameKind.Field)\n\t\tself.match(TokenKind.Space)\n\t\tself.match(TokenKind.Keyword_Is)\n\t\tself.match(TokenKind.Space)\n\t\ttype = self.parseType()\n\n\t\treturn FieldDefinition(start.cursor, name, type)\n\n\n#************************************************************* Mangler/Encoder ***************************************************\n\n# A visitor that mangles (encodes) names so as to allow easy resolution of overloaded names (NOT A PASS!).\nclass Mangler(Visitor):\n\n\tdef __init__(self) -> None:\n\t\tself.__sep = '$'\n\t\tself.__result = self.__sep + 'b0' + self.__sep\n\t\tself.__widths = { 1 : 0, 2 : 1, 4 : 2, 8 : 3, 16 : 4, 32 : 5, 64 : 6, 128 : 7 }\n\n\t# Returns the currently mangled name AND resets the buffer in preparation for the next mangling!\n\t@property\n\tdef result(self) -> str:\n\t\tresult = self.__result\n\t\tself.__result = self.__sep + 'b0' + self.__sep\n\t\treturn result\n\n\tdef __add(self, text : str) -> None:\n\t\tself.__result += text\n\n\t# Converts a width specification in bits (a power of two) into the equivalent bit count.\n\tdef __encodeWidth(self, width : int) -> int:\n\t\treturn self.__widths[width]\n\n\tdef _visitRoutine(self, that : Routine, argument : Any) -> Any:\n\t\tthat.name.visit(self, argument)\n\n\t\tself.__add(self.__sep)\n\t\tfor parameter in that.parameters:\n\t\t\tparameter.visit(self, argument)\n\n\t\t# Don't include the return type as Braceless does not allow overloading solely on the return type.\n\t\tif False:\n\t\t\tself.__add(self.__sep)\n\t\t\tthat.type.visit(self, argument)\n\n\tdef visitBinaryExpression(self, that : BinaryExpression, prefix : bool = True) -> None:\n\t\tself.__add('o')\n\t\tself.__add(OPERATOR_ENCODINGS[that.operator_kind])\n\n\t\tself.__add(self.__sep)\n\t\tassert(that.first.type is not None)\n\t\tthat.first.type.visit(self, False)\n\t\tassert(that.other.type is not None)\n\t\tthat.other.type.visit(self, False)\n\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> None:\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitBooleanType(self, that, argument : Any) -> Any:\n\t\tself.__add(\"b%x\" % self.__encodeWidth(that.width))\n\n\tdef visitCallExpression(self, that : CallExpression, prefix : bool) -> None:\n\t\t# r = routine, any routine as we cannot encode the function type (method, function, helper, etc.).\n\t\tif prefix:\n\t\t\tself.__add('r')\n\t\t\tself.__add(that.name.text)\n\t\t\tself.__add(self.__sep)\n\n\t\tfor argument in that.arguments:\n\t\t\targument.visit(self, False)\n\n\tdef visitCallStatement(self, that : CallStatement, prefix : bool) -> None:\n\t\tif prefix:\n\t\t\tself.__add('r')\n\t\t\tself.__add(that.name.text)\n\t\t\tself.__add(self.__sep)\n\n\t\tfor argument in that.arguments:\n\t\t\targument.visit(self, False)\n\n\tdef visitCardinalLiteral(self, that : CardinalLiteral, argument : Any) -> None:\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitCardinalType(self, that : CardinalType, argument : Any) -> None:\n\t\tself.__add(\"u%x\" % self.__encodeWidth(that.width))\n\n\tdef visitName(self, that : Name, type : Node) -> None:\n\t\tself.__add(that.text)\n\n\tdef visitOperatorStatement(self, that : OperatorStatement, argument : Any) -> None:\n\t\t# o = operator\n\t\tself.__add('o')\n\t\tself.__add(OPERATOR_ENCODINGS[that.operator_kind])\n\n\t\tself.__add(self.__sep)\n\t\tfor parameter in that.parameters:\n\t\t\tparameter.visit(self, argument)\n\n\tdef visitParameter(self, that : Parameter, argument : Any) -> None:\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitReference(self, that : Reference, argument : Any) -> None:\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitRoutine(self, that : Routine, argument : Any) -> None:\n\t\t# r = routine, any routine as we cannot encode the function type (method, function, helper, etc.).\n\t\tself.__add('r')\n\t\tself._visitRoutine(that, argument)\n\n\tdef visitSelectorExpression(self, that : SelectorExpression, argument : Any) -> None:\n\t\traise InternalError(\"Not implemented\")\n\n\tdef visitTertiaryExpression(self, that : TertiaryExpression, argument : Any) -> None:\n\t\tthat.first.visit(self, argument)\n\t\tthat.other.visit(self, argument)\n\t\tthat.third.visit(self, argument)\n\n\tdef visitVoidType(self, that : VoidType, argument : Any) -> None:\n\t\tself.__add('v')\n\n\tdef visitUnaryExpression(self, that : UnaryExpression, argument : Any) -> None:\n\t\tthat.first.visit(self, argument)\n\n\n#************************************************* Resolver (Symbol and Type Identificator) **************************************\n\n# Walks the AST and resolves all modules, classes, and routines using the supplied symbol table.\nclass OuterResolver(Visitor):\n\n\tdef __init__(self, symbols : SymbolTable) -> None:\n\t\tself.__symbols = symbols\n\t\tself.__mangler = Mangler()\n\n\tdef __mangle(self, node : Node) -> str:\n\t\tself.__mangler.visit(node, True)\n\t\treturn self.__mangler.result\n\n\tdef visitClassDefinition(self, that : ClassDefinition, argument : Any) -> None:\n\t\tself.__symbols.enter(that.name.cursor, that.name.text, that)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\t\tself.__symbols.leave(that.name.cursor, that.name.text, that)\n\n\tdef visitFieldDefinition(self, that : FieldDefinition, argument : Any) -> None:\n\t\tself.__symbols.insert(that.name.cursor, that.name.text, that)\n\n\tdef visitGuardDefinition(self, that : GuardDefinition, argument : Any) -> None:\n\t\tself.__symbols.insert(that.name.cursor, that.name.text, that)\n\n\tdef visitModuleDefinition(self, that : ModuleDefinition, argument : Any) -> None:\n\t\tself.__symbols.insert(that.name.cursor, that.name.text, that)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitOperatorStatement(self, that : OperatorStatement, argument : Any) -> None:\n\t\tself.__symbols.insert(that.cursor, OPERATOR_ENCODINGS[that.operator_kind], that)\n\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitRoutine(self, that : Routine, argument : Any) -> None:\n\t\tself.__symbols.insert(that.cursor, that.name.text, that)\n\n\n# Walks the AST and resolves all locals using the supplied symbol table.\nclass InnerResolver(Visitor):\n\n\tdef __init__(self, symbols : SymbolTable) -> None:\n\t\tself.__symbols = symbols\n\n\tdef __mangle(self, node : Node) -> str:\n\t\tmangler = Mangler()\n\t\tmangler.visit(node, True)\n\t\treturn mangler.result\n\n\tdef visitAbsoluteExpression(self, that : AbsoluteExpression, argument : Any) -> Node:\n\t\tthat.type = that.first.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitBinaryExpression(self, that : BinaryExpression, argument : Any) -> Node:\n\t\tthat.first.visit(self, argument)\n\t\tthat.other.visit(self, argument)\n\n\t\t# Look up binary operator - these are already mangled in the constructor of 'Environment'.\n\t\tmangled = self.__mangle(that)\n\t\tthat.type = self.__symbols.locate(that.cursor, mangled).type\n\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\t\treturn that.type\n\n\tdef visitBooleanType(self, that : BooleanType, argument : Any) -> Node:\n\t\treturn that\n\n\tdef visitCallExpression(self, that : CallExpression, argument : Any) -> Node:\n\t\tthat.name.visit(self, argument)\n\n\t\tfor argument1 in that.arguments:\n\t\t\targument1.visit(self, argument)\n\n\t\troutine_type = self.__symbols.locate(that.cursor, that.name.text)\n\t\tassert(isinstance(routine_type, Routine))\n\t\tthat.type = routine_type.type\n\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitCallStatement(self, that : CallStatement, argument : Any) -> None:\n\t\tthat.name.visit(self, argument)\n\t\tfor argument1 in that.arguments:\n\t\t\targument1.visit(self, argument)\n\n\t\troutine_type = self.__symbols.locate(that.cursor, that.name.text)\n\t\tassert(isinstance(routine_type, Routine))\n\t\tthat.type = routine_type\n\n\tdef visitCardinalLiteral(self, that : CardinalLiteral, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\t\treturn that.type\n\n\tdef visitCardinalType(self, that : CardinalType, argument : Any) -> Node:\n\t\treturn that\n\n\tdef visitClassDefinition(self, that : ClassDefinition, argument : Any) -> None:\n\t\tself.__symbols.enter(that.cursor, that.name.text, that)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\t\tself.__symbols.leave(that.cursor, that.name.text, that)\n\n\tdef visitConstructorExpression(self, that : ConstructorExpression, argument : Any) -> Node:\n\t\tassert(isinstance(that.name, Name))\n\t\tthat.type = self.__symbols.locate(that.cursor, that.name.text)\n\t\tassert(that.type is not None)\n\t\t# NOTE: The type has already been assigned, so don't try to reassign it!\n\t\t# that.type.visit(self, argument)\n\t\tfor value in that.values:\n\t\t\tvalue.visit(self, argument)\n\t\treturn that.type\n\n\tdef visitCreateStatement(self, that : CreateStatement, argument : Any) -> None:\n\t\tthat.name.visit(self, that.type)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitElifStatement(self, that : ElifStatement, argument : Any) -> None:\n\t\tthat.condition.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitElseStatement(self, that : ElseStatement, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitFieldDefinition(self, that : FieldDefinition, argument : Any) -> None:\n\t\tthat.name.visit(self, that.type)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\n\tdef visitGuardDefinition(self, that : GuardDefinition, argument : Any) -> None:\n\t\ttype = that.subject.visit(self, argument)\n\t\tthat.name.visit(self, type)\n\n\tdef visitIfElifElseStatement(self, that : IfElifElseStatement, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitIfStatement(self, that : IfStatement, argument : Any) -> None:\n\t\tthat.condition.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitLetStatement(self, that : LetStatement, argument : Any) -> None:\n\t\tthat.name.visit(self, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitModuleDefinition(self, that : ModuleDefinition, argument : Any) -> None:\n\t\tself.__symbols.locate(that.name.cursor, that.name.text)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitName(self, that : Name, type : Node) -> Node:\n\t\tself.__symbols.insert(that.cursor, that.text, type)\n\t\tthat.type = type\n\t\treturn type\n\n\tdef visitNoneExpression(self, that : NoneExpression, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitNoneType(self, that : NoneType, argument : Any) -> Node:\n\t\treturn that\n\n\tdef visitParameter(self, that : Parameter, argument : Any) -> None:\n\t\tthat.name.visit(self, that.type)\n\n\tdef visitOperatorStatement(self, that : OperatorStatement, argument : Any) -> None:\n\t\tencoding = OPERATOR_ENCODINGS[that.operator_kind]\n\t\tself.__symbols.enter(that.cursor, encoding, that)\n\t\tfor parameter in that.parameters:\n\t\t\tparameter.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\t\tself.__symbols.leave(that.cursor, encoding, that)\n\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitReference(self, that : Reference, argument : Any) -> Node:\n\t\tthat.type = self.__symbols.locate(that.cursor, that.text)\n\t\treturn that.type\n\n\tdef visitRelativeExpression(self, that : RelativeExpression, argument : Any) -> Node:\n\t\tthat.type = self.__symbols.locate(that.name.cursor, that.name.text)\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitReturnStatement(self, that : ReturnStatement, argument : Any) -> None:\n\t\tthat.type = that.expression.visit(self, argument)\n\n\tdef visitRoutine(self, that : Routine, argument : Any) -> None:\n\t\tself.__symbols.enter(that.cursor, that.name.text, that)\n\t\tfor parameter in that.parameters:\n\t\t\tparameter.visit(self, argument)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\t\tself.__symbols.leave(that.cursor, that.name.text, that)\n\n\tdef visitSelectorExpression(self, that : SelectorExpression, argument : Any) -> None:\n\t\tthat.first.visit(self, argument)\n\t\tthat.other.visit(self, argument)\n\n\tdef visitSetStatement(self, that : SetStatement, argument : Any) -> None:\n\t\tthat.name.visit(self, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitTertiaryExpression(self, that : TertiaryExpression, argument : Any) -> Node:\n\t\tthat.first.visit(self, argument)\n\t\tthat.other.visit(self, argument)\n\t\tthat.third.visit(self, argument)\n\t\tassert(that.other.type is not None)\n\t\treturn that.other.type\n\n\tdef visitThrowStatement(self, that : ThrowStatement, argument : Any) -> None:\n\t\tthat.type = that.expression.visit(self, argument)\n\n\tdef visitTypeName(self, that : TypeName, argument : Any) -> Node:\n\t\tthat.type = that.name.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitUnaryExpression(self, that : UnaryExpression, argument : Any) -> Node:\n\t\tthat.type = that.first.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitVoidExpression(self, that : VoidExpression, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitVoidType(self, that : VoidType, argument : Any) -> Node:\n\t\treturn that\n\n\n#********************************************************* Checker (Static Checks) ***********************************************\n\n# Exception class used to report errors discovered while performing static type checking.\nclass CheckerError(CursorError):\n\n\tdef __init__(self, cursor, text) -> None:\n\t\tCursorError.__init__(self, cursor, text)\n\n\tdef __str__(self) -> str:\n\t\treturn \"%s Error: %s\" % ( self.cursor, self.text )\n\n\n# Visitor class that performs static checks.\nclass Checker(Visitor):\n\n\tdef __init__(self) -> None:\n\t\tself.__module : Optional[ModuleDefinition] = None\n\t\tself.__entries : List[Routine] = []\n\n\tdef visitBinaryExpression(self, that : BinaryExpression, argument : Any) -> Node:\n\t\tfirst_type = that.first.visit(self, argument)\n\t\tother_type = that.other.visit(self, argument)\n\t\tassert(first_type != None and other_type != None)\n\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\t\tif False:\n\t\t\tif first_type.type_kind != other_type.type_kind:\n\t\t\t\traise CheckerError(that.cursor, \"Types of arguments to binary operators must currently be identical\")\n\n\t\t\treturn first_type\n\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\treturn that.type.visit(self, argument)\n\n\tdef visitBooleanType(self, that : BooleanType, argument : Any) -> Node:\n\t\treturn that\n\n\tdef visitCallExpression(self, that : CallExpression, dummy : Any) -> Node:\n\t\t# NOTE: A parameter is a formal parameter declaration, an argument is something that is passed into a routine.\n\t\troutine : Routine = cast(Routine, that.name.type)\n\n\t\t# Check that the number of arguments matches the number of parameters.\n\t\tif len(that.arguments) != len(routine.parameters):\n\t\t\traise CheckerError(that.cursor, \"Wrong number of arguments in call to routine '\" + routine.name.text + \"'\")\n\n\t\t# Check each parameter and argument in turn.\n\t\tfor index in range(len(that.arguments)):\n\t\t\tparameter = routine.parameters[index]\n\t\t\targument = that.arguments[index]\n\t\t\tif parameter.type_kind != argument.type_kind:\n\t\t\t\traise CheckerError(that.cursor, \"Argument has wrong type for parameter '\" + parameter.name.text + \"'\")\n\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitCallStatement(self, that : CallStatement, dummy : Any) -> None:\n\t\t# NOTE: A parameter is a formal parameter declaration, an argument is something that is passed into a routine.\n\t\troutine : Routine = cast(Routine, that.type)\n\n\t\t# Check that the number of arguments matches the number of parameters.\n\t\tif len(that.arguments) != len(routine.parameters):\n\t\t\traise CheckerError(that.cursor, \"Wrong number of arguments in call to routine '\" + routine.name.text + \"'\")\n\n\t\t# Check each parameter and argument in turn.\n\t\tfor index in range(len(that.arguments)):\n\t\t\tparameter = routine.parameters[index]\n\t\t\targument = that.arguments[index]\n\t\t\tif parameter.type_kind != argument.type_kind:\n\t\t\t\traise CheckerError(that.cursor, \"Argument has wrong type for parameter '\" + parameter.name.text + \"'\")\n\n\tdef visitCardinalLiteral(self, that : CardinalLiteral, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\treturn that.type.visit(self, argument)\n\n\tdef visitCardinalType(self, that : CardinalType, argument : Any) -> Type:\n\t\treturn that\n\n\tdef visitClassDefinition(self, that : ClassDefinition, argument : Any) -> None:\n\t\t# TODO: Finish up Checker.visitClassDefinition.\n\t\tpass\n\n\tdef visitCreateStatement(self, that : CreateStatement, argument : Any) -> None:\n\t\tthat.name.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\t\texpression_type = that.expression.visit(self, argument)\n\n\t\tif expression_type.type_kind != that.type.type_kind:\n\t\t\traise CheckerError(that.cursor, \"Type mismatch between variable and its initializer\")\n\n\tdef visitElifStatement(self, that : ElifStatement, argument : Any) -> None:\n\t\ttype = that.condition.visit(self, argument)\n\t\tif type.type_kind != TypeKind.Boolean:\n\t\t\traise CheckerError(that.cursor, \"'elif' statement takes a boolean condition\")\n\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitElseStatement(self, that : ElseStatement, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitIfElifElseStatement(self, that : IfElifElseStatement, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitIfStatement(self, that : IfStatement, argument : Any) -> None:\n\t\ttype = that.condition.visit(self, argument)\n\t\tif type.type_kind != TypeKind.Boolean:\n\t\t\traise CheckerError(that.cursor, \"'if' statement takes a boolean condition\")\n\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitLetStatement(self, that : LetStatement, argument : Any) -> None:\n\t\tthat.name.visit(self, argument)\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitModuleDefinition(self, that : ModuleDefinition, argument : Any) -> None:\n\t\t# Check that there is at most one module statement\n\t\tif self.__module:\n\t\t\traise CheckerError(that.cursor, \"Extraneous 'module' statement detected\")\n\t\tself.__module = that\n\n\t\t# Check that the specified module name matches the actual disk file name.\n\t\tbasename = os.path.split(that.cursor.file)[1]\n\t\tbasename = os.path.splitext(basename)[0]\n\t\tif that.name.text != basename:\n\t\t\traise CheckerError(that.cursor, \"Module name differs from source file base name\")\n\n\t\t# Check that the module name is written in camel case (TODO: Improve!).\n\t\tif basename[0].islower():\n\t\t\traise CheckerError(that.cursor, \"Module name must be written in CamelCase\")\n\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitName(self, that : Name, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitOperatorStatement(self, that : OperatorStatement, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> None:\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitReference(self, that : Reference, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\treturn that.type\n\n\tdef visitReturnStatement(self, that : ReturnStatement, argument : Any) -> None:\n\t\t# Find the parent routine's return type.\n\t\tparent_routine = that.findover('Routine')\n\t\tif not parent_routine:\n\t\t\traise InternalError(\"Unable to locate routine containing 'return' statement: %s\" % that)\n\n\t\treturn_type = parent_routine.type\n\t\tassert(that.type is not None)\n\t\tassert(return_type is not None)\n\t\tif that.type.type_kind != return_type.type_kind:\n\t\t\traise CheckerError(that.cursor, \"Type mismatch - 'return' expression type is different from routine return type\")\n\n\t\t# Check the return expression itself.\n\t\tthat.expression.visit(self, argument)\n\n\tdef visitRoutine(self, that : Routine, argument : Any) -> None:\n\t\tif that.routine_kind == RoutineKind.Entry:\n\t\t\t# Check that all entry points return a boolean indicating success or failure.\n\t\t\tif that.type_kind != TypeKind.Boolean:\n\t\t\t\traise CheckerError(that.cursor, \"Return type of 'entry' statement must be 'boolean'\")\n\n\t\t\t# Check that the entry point hasn't already been defined.\n\t\t\tif that.name.text in map(lambda x: x.name.text, self.__entries):\n\t\t\t\traise CheckerError(that.cursor, \"Entry point already defined: \" + that.name.text)\n\n\t\t\t# Save this entry for later analysis of which entry is the actual entry point.\n\t\t\tself.__entries.append(that)\n\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\tdef visitTertiaryExpression(self, that : TertiaryExpression, argument : Any) -> Node:\n\t\tfirst_type = that.first.visit(self, argument)\n\t\tother_type = that.other.visit(self, argument)\n\t\tthird_type = that.third.visit(self, argument)\n\n\t\tif first_type.type_kind != TypeKind.Boolean:\n\t\t\traise CheckerError(that.cursor, \"First expression of tertiary operator must be a condition (boolean)\")\n\n\t\tif other_type.type_kind != third_type.type_kind:\n\t\t\traise CheckerError(that.cursor, \"Second and third arguments to tertiary operator must be of same type\")\n\n\t\treturn other_type\n\n\tdef visitThrowStatement(self, that : ThrowStatement, argument : Any) -> None:\n\t\t# TODO: Check 'throw' statements, they must throw a subclass of Braceless0.Exception.\n\t\tpass\n\n\tdef visitUnaryExpression(self, that : UnaryExpression, argument : Any) -> Node:\n\t\ttype = that.first.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\treturn type\n\n\tdef visitVoidExpression(self, that : VoidExpression, argument : Any) -> Node:\n\t\tassert(that.type is not None)\n\t\tthat.type.visit(self, argument)\n\t\treturn that.type\n\n\tdef visitVoidType(self, that : VoidType, argument : Any) -> Node:\n\t\treturn that\n\n\n#********************************************************* C Writer (Backend) ****************************************************\n\nC_MAIN_TEMPLATE = \"\"\"\nint main(int argc, const char *argv[])\n{\n\tException* __b0_status = %(ENTRY)();\n\n\tif (__b0_status != NULL)\n\t{\n\t\t/* TODO: Dump the call stack (somehow). */\n\t\tfprintf(stderr, \"Unhandled exception: %u\\n\", __b0_status->code);\n\t}\n\n\treturn (__b0_status == NULL) ? EXIT_SUCCESS : EXIT_FAILURE;\n}\n\"\"\".strip()\n\nclass CWriter(Visitor):\n\n\tdef __init__(self, setup, writer) -> None:\n\t\t# Save the global configuration as we need it from time to time.\n\t\tself.__setup = setup\n\t\tself.__writer = writer\n\n\t\tself.__printer = Printer()\n\n\t\tself.__operators = {\n\t\t\tOperatorKind.Algebraic_Addition : \"+\",\n\t\t\tOperatorKind.Algebraic_Division : \"/\",\n\t\t\tOperatorKind.Algebraic_Multiplication : \"*\",\n\t\t\tOperatorKind.Algebraic_Remainder : \"%\",\n\t\t\tOperatorKind.Algebraic_Subtraction : \"-\",\n\t\t\tOperatorKind.Logical_And : \"&&\",\n\t\t\tOperatorKind.Logical_Or : \"||\",\n\t\t\tOperatorKind.Logical_Xor : \"^\",\n\t\t\tOperatorKind.Relational_Difference : \"!=\",\n\t\t\tOperatorKind.Relational_Equality : \"==\",\n\t\t\tOperatorKind.Relational_GreaterEqual : \">=\",\n\t\t\tOperatorKind.Relational_GreaterThan : \">\",\n\t\t\tOperatorKind.Relational_LessEqual : \"<=\",\n\t\t\tOperatorKind.Relational_LessThan : \"<\",\n\t\t}\n\n\t\tself.__reset()\n\n\t# Pretty-prints the specified AST subtree as Braceless0 source code in a comment.\n\tdef __print(self, node : Node) -> None:\n\t\tself.__writer.flush(\"/*: \" + self.__printer.visit(node) + \" */\")\n\n\t# Resets the per-routine definitions in the C writer.\n\tdef __reset(self) -> None:\n\t\tself.__names : Dict[str, int] = {}\n\n\tdef __unique(self, name):\n\t\tindex = self.__names.get(name)\n\t\tif not index:\n\t\t\tself.__names[name] = 1\n\t\t\treturn name\n\n\t\tself.__names[name] += 1\n\t\treturn name + \"_\" + str(index)\n\n\t# Writes a list of parameters to the class-wide output stream.\n\tdef _writeParameters(self, that : Routine, result_tuple : Any) -> None:\n\t\tself.__writer.write(\"(\")\n\n\t\t# Write the user parameters.\n\t\tfor parameter in that.parameters:\n\t\t\tif parameter != that.parameters[0]:\n\t\t\t\tself.__writer.write(\", \")\n\t\t\tparameter.visit(self, None)\n\n\t\t# Write the Braceless0 return value parameter.\n\t\t( kind, type ) = result_tuple\n\t\tif kind != TypeKind.Void:\n\t\t\tif len(that.parameters) > 0:\n\t\t\t\tself.__writer.write(\", \")\n\t\t\tself.__writer.write(\"%s* __b0_result\" % type)\n\n\t\tself.__writer.write(\")\")\n\n\t# Outputs a binary expression.\n\tdef visitBinaryExpression(self, that : BinaryExpression, argument : Any) -> str:\n\t\topcode = self.__operators.get(that.operator_kind)\n\t\tif not opcode:\n\t\t\traise InternalError(\"Unknown operator: %s\" % that.operator_kind)\n\n\t\t# TODO: evaluate first (exceptiona aware), then other (...), and then finally call the operator (...).\n\t\tresult = that.first.visit(self, argument)\n\t\tresult += \" %s \" % opcode\n\t\tresult += that.other.visit(self, argument)\n\t\treturn result\n\n\t# Outputs a boolean literal expression.\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> str:\n\t\treturn { False : 'false', True : 'true' }[that.value]\n\n\t# Outputs a boolean type.\n\tdef visitBooleanType(self, that : BooleanType, argument : Any) -> str:\n\t\treturn \"bool\"\n\n\t# Outputs a call to a function.\n\tdef visitCallExpression(self, that : CallExpression, argument : Any) -> None:\n\t\t# Allocate the 'result' variable.\n\n\t\t# Output start of exception propagation code.\n\t\tself.__writer.write(\"__b0_status = \")\n\n\t\t# Output name of the callee function.\n\t\tself.__writer.write(that.name.visit(self, argument))\n\n\t\t# Output the user-defined arguments.\n\t\tself.__writer.write(\"(\")\n\t\tfor argument1 in that.arguments:\n\t\t\tif argument1 != that.arguments[0]:\n\t\t\t\tself.__writer.write(\", \")\n\t\t\tself.__writer.write(argument1.visit(self, argument))\n\n\t\t# Output the synthetic 'result' argument.\n\t\tassert(that.type is not None)\n\t\tif that.type.type_kind != TypeKind.Void:\n\t\t\tif len(that.arguments) > 0:\n\t\t\t\tself.__writer.write(\", \")\n\t\t\tresult = self.__unique(\"result\")\n\t\t\tself.__writer.write(\"&%s\" % result)\n\t\telse:\n\t\t\tresult_type = None\n\n\t\tself.__writer.write(\")\")\n\n\t\t# Output exception propagation code.\n\t\tself.__writer.flush(\"; Exception-propagation code.\")\n\t\tsuccess = self.__unique(\"success\")\n\t\tself.__writer.flush(\"%s = icmp eq %%Exception* %s, null\" % ( success, status ))\n\t\tlabel = self.__unique('return')\n\t\t#self.__writer.flush(\"br i1 %s, label %s, label %s\" % ( success, self.__format_label_reference(label), self.__format_label_reference(\"epilogue\") ))\n\t\tself.__writer.flush()\n\n\t\t# Output load of routine return value.\n\t\t#self.enterBlock(label)\n\t\tactive : Optional[str] = None\n\t\tif result_type:\n\t\t\tself.__writer.flush(\"; Return the result\")\n\t\t\tactive = self.allocate_value(\"result\")\n\t\t\t#self.__writer.flush(\"%s = load %s, %s %s\" % ( active, result_type, self.__format_pointer(result_type), result_temp ))\n\n\t\t#return active\n\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> None:\n\t\tself.__writer.flush(\"/*\")\n\t\tself.__writer.indent()\n\t\tself.__writer.flush(\"Generator: %s\" % banner_message())\n\t\tself.__writer.flush(\"PLEASE DO NOT EDIT THIS FILE! REPORT THE ISSUE INSTEAD!\")\n\t\tself.__writer.flush()\n\n\t\t# Descend towards bottom of tree.\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\t\tself.__reset()\n\n\t\t# TODO: Determine THE ONE AND ONLY entry point if no explicit entry point has been specified using the '-entry' option.\n\t\tif not self.__setup['target.entry']:\n\t\t\traise SetupError(\"No entry point given using the -entry option\")\n\n\t\t# Output a custom main() function so that the program can be linked and executed.\n\t\tself.__writer.flush(\"/* Generated main() function. */\")\n\t\tself.__writer.flush(C_MAIN_TEMPLATE % { 'ENTRY' : self.__setup['target.entry'] })\n\n\n#***************************************************** LLVM Writer (Backend) *****************************************************\n\nSYSTEM_LINUX_ARM_32 = \"\"\"\n; The standard Clang configuration for Linux ARM32 (armv7hf) machines. DO NOT EDIT UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING!\n\ntarget datalayout = \"e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64\"\ntarget triple = \"armv7-unknown-linux-gnueabihf\"\n\nattributes #0 = {\n\tnounwind\n\t\"correctly-rounded-divide-sqrt-fp-math\"=\"false\"\n\t\"disable-tail-calls\"=\"false\"\n\t\"less-precise-fpmad\"=\"false\"\n\t\"min-legal-vector-width\"=\"0\"\n\t\"no-frame-pointer-elim\"=\"true\"\n\t\"no-frame-pointer-elim-non-leaf\"\n\t\"no-infs-fp-math\"=\"false\"\n\t\"no-jump-tables\"=\"false\"\n\t\"no-nans-fp-math\"=\"false\"\n\t\"no-signed-zeros-fp-math\"=\"false\"\n\t\"no-trapping-math\"=\"false\"\n\t\"stack-protector-buffer-size\"=\"8\"\n\t\"target-cpu\"=\"generic\"\n\t\"target-features\"=\"+armv7-a,+dsp,+neon,+vfp3,-thumb-mode\"\n\t\"unsafe-fp-math\"=\"false\"\n\t\"use-soft-float\"=\"false\"\n}\n\"\"\"\n\nSYSTEM_LINUX_ARM_64 = \"\"\"\n; The standard Clang configuration for Linux ARM64 machines. DO NOT EDIT UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING!\n\ntarget datalayout = \"e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128\"\ntarget triple = \"aarch64-unknown-linux-gnu\"\n\nattributes #0 = {\n\tnounwind\n\t\"correctly-rounded-divide-sqrt-fp-math\"=\"false\"\n\t\"disable-tail-calls\"=\"false\"\n\t\"less-precise-fpmad\"=\"false\"\n\t\"min-legal-vector-width\"=\"0\"\n\t\"no-frame-pointer-elim\"=\"true\"\n\t\"no-frame-pointer-elim-non-leaf\"\n\t\"no-infs-fp-math\"=\"false\"\n\t\"no-jump-tables\"=\"false\"\n\t\"no-nans-fp-math\"=\"false\"\n\t\"no-signed-zeros-fp-math\"=\"false\"\n\t\"no-trapping-math\"=\"false\"\n\t\"stack-protector-buffer-size\"=\"8\"\n\t\"target-cpu\"=\"generic\"\n\t\"target-features\"=\"+neon\"\n\t\"unsafe-fp-math\"=\"false\"\n\t\"use-soft-float\"=\"false\"\n}\n\"\"\"\n\nSYSTEM_LINUX_X86_64 = \"\"\"\n; The standard Clang configuration for Linux x86-64 machines. DO NOT EDIT UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING!\n\ntarget datalayout = \"e-m:e-i64:64-f80:128-n8:16:32:64-S128\"\ntarget triple = \"x86_64-pc-linux-gnu\"\n\nattributes #0 = {\n\tnounwind\n\t\"correctly-rounded-divide-sqrt-fp-math\"=\"false\"\n\t\"disable-tail-calls\"=\"false\"\n\t\"less-precise-fpmad\"=\"false\"\n\t\"min-legal-vector-width\"=\"0\"\n\t\"no-frame-pointer-elim\"=\"false\"\n\t\"no-infs-fp-math\"=\"false\"\n\t\"no-jump-tables\"=\"false\"\n\t\"no-nans-fp-math\"=\"false\"\n\t\"no-signed-zeros-fp-math\"=\"false\"\n\t\"no-trapping-math\"=\"false\"\n\t\"stack-protector-buffer-size\"=\"8\"\n\t\"target-cpu\"=\"x86-64\"\n\t\"target-features\"=\"+fxsr,+mmx,+sse,+sse2,+x87\"\n\t\"unsafe-fp-math\"=\"false\"\n\t\"use-soft-float\"=\"false\"\n}\n\"\"\"\n\nSYSTEM_WINDOWS_X86_64 = \"\"\"\n; The standard Clang configuration for Windows x86-64 machines. DO NOT EDIT UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING!\n\ntarget datalayout = \"e-m:w-i64:64-f80:128-n8:16:32:64-S128\"\ntarget triple = \"x86_64-pc-windows-msvc19.11.0\"\n\nattributes #0 =\n{\n\tnounwind\n\t\"correctly-rounded-divide-sqrt-fp-math\"=\"false\"\n\t\"disable-tail-calls\"=\"false\"\n\t\"less-precise-fpmad\"=\"false\"\n\t\"no-frame-pointer-elim\"=\"false\"\n\t\"no-infs-fp-math\"=\"false\"\n\t\"no-jump-tables\"=\"false\"\n\t\"no-nans-fp-math\"=\"false\"\n\t\"no-signed-zeros-fp-math\"=\"false\"\n\t\"no-trapping-math\"=\"false\"\n\t\"stack-protector-buffer-size\"=\"8\"\n\t\"target-cpu\"=\"x86-64\"\n\t\"target-features\"=\"+fxsr+mmx+sse+sse2+x87\"\n\t\"unsafe-fp-math\"=\"false\"\n\t\"use-soft-float\"=\"false\"\n}\n\nattributes #1 =\n{\n\tnofree\n\tnounwind\n\t\"correctly-rounded-divide-sqrt-fp-math\"=\"false\"\n\t\"disable-tail-calls\"=\"false\"\n\t\"less-precise-fpmad\"=\"false\"\n\t\"no-frame-pointer-elim\"=\"false\"\n\t\"no-infs-fp-math\"=\"false\"\n\t\"no-nans-fp-math\"=\"false\"\n\t\"no-signed-zeros-fp-math\"=\"false\"\n\t\"no-trapping-math\"=\"false\"\n\t\"stack-protector-buffer-size\"=\"8\"\n\t\"target-cpu\"=\"x86-64\"\n\t\"target-features\"=\"+cx8,+fxsr,+mmx,+sse,+sse2,+x87\"\n\t\"unsafe-fp-math\"=\"false\" \"use-soft-float\"=\"false\"\n}\n\"\"\"\n\n\n# TODO: Use alloca/store/load instead, also for exception instance pointers\n\n# The LLVM v8+ IR back-end, which writes LLVM v8+ IR to the specified stream so that it can later be compiled with Clang.\nclass LlvmWriter(Visitor):\n\n\tdef __init__(self, setup, writer) -> None:\n\t\t# Save the global configuration as we need it from time to time.\n\t\tself.__setup = setup\n\t\tself.__writer = writer\n\n\t\tself.__printer = Printer()\n\n\t\tself.__reset()\n\n\t\tself.__relational_opcodes = {\n\t\t\tOperatorKind.Relational_GreaterEqual : \"uge\",\n\t\t\tOperatorKind.Relational_GreaterThan : \"ugt\",\n\t\t\tOperatorKind.Relational_LessThan : \"ult\",\n\t\t\tOperatorKind.Relational_LessEqual : \"ule\",\n\t\t\tOperatorKind.Relational_Equality : \"eq\",\n\t\t\tOperatorKind.Relational_Difference : \"ne\",\n\t\t}\n\t\tself.__auxiliary_opcodes = {\n\t\t\tOperatorKind.Logical_And : \"and\",\n\t\t\tOperatorKind.Logical_Or : \"or\",\n\t\t\tOperatorKind.Logical_Xor : \"xor\",\n\t\t\tOperatorKind.Algebraic_Addition : \"add\",\n\t\t\tOperatorKind.Algebraic_Subtraction : \"sub\",\n\t\t\tOperatorKind.Algebraic_Multiplication : \"mul\",\n\t\t\tOperatorKind.Algebraic_Division : \"udiv\",\n\t\t\tOperatorKind.Algebraic_Remainder : \"urem\",\n\t\t}\n\t\t# TODO: It would be better to transform the AST so that 'let x y= z' becomes 'let x := x y z' for the cases below.\n\t\tself.__assignment_opcodes = {\n\t\t\tAssignmentKind.Addition : \"add\",\n\t\t\tAssignmentKind.Division : \"div\",\n\t\t\t# NOTE: 'AssignmentKind.Identity\" is special-cased in 'visitLetStatement' below.\n\t\t\tAssignmentKind.Multiplication : \"mul\",\n\t\t\t# NOTE: 'AssignmentKind.Optional\" is special-cased in 'visitLetStatement' below.\n\t\t\tAssignmentKind.Remainder : \"udiv\",\n\t\t\tAssignmentKind.Subtraction : \"sub\",\n\t\t}\n\n\t\tself.__systems : Dict[str, str] = {\n\t\t\t'linux-arm-32' : SYSTEM_LINUX_ARM_32.strip(),\n\t\t\t'linux-arm-64' : SYSTEM_LINUX_ARM_64.strip(),\n\t\t\t'linux-x86-64' : SYSTEM_LINUX_X86_64.strip(),\n\t\t\t'windows-x86-64' : SYSTEM_WINDOWS_X86_64.strip(),\n\t\t}\n\n\tdef __print(self, node : Node) -> None:\n\t\tself.__writer.flush(\";; \" + self.__printer.visit(node))\n\n\tdef __reset(self) -> None:\n\t\t# Reset the block counters.\n\t\tself.__blocks : Dict[str, int] = {}\n\n\t\t# Reset the temporary locals counters.\n\t\tself.__locals : Dict[str, int] = {}\n\n\t\tself.__label : Optional[str] = None\n\n\t\tself.__exception : Optional[ThrowStatement] = None\n\n\tdef __format_global(self, label : str) -> str:\n\t\treturn \"@\" + label\n\n\tdef __format_label_definition(self, label : str) -> str:\n\t\treturn label + \":\"\n\n\tdef __format_label_reference(self, label : str) -> str:\n\t\treturn \"%\" + label\n\n\tdef __format_local(self, label : str) -> str:\n\t\treturn \"%\" + label\n\n\tdef __format_pointer(self, type : str) -> str:\n\t\treturn type + \"*\"\n\n\tdef __format_type(self, type : str) -> str:\n\t\treturn type\n\n\tdef __format_type_definition(self, type : str) -> str:\n\t\treturn \"%\" + type\n\n\t# Allocates a block index and returns it combined with the specified base name.\n\tdef allocate_block(self, name : str) -> str:\n\t\tindex = self.__blocks.get(name)\n\t\tif not index:\n\t\t\tself.__blocks[name] = 1\n\t\t\treturn name\n\n\t\tself.__blocks[name] += 1\n\t\treturn name + '-' + str(index)\n\n\t# Allocates a temporary and return a label reference.\n\tdef allocate_value(self, name : str = \"$t\") -> str:\n\t\tindex = self.__locals.get(name)\n\t\tif not index:\n\t\t\tself.__locals[name] = 1\n\t\t\treturn self.__format_local(name)\n\n\t\tself.__locals[name] += 1\n\t\treturn self.__format_local(name + '-' + str(index))\n\n\t@property\n\tdef current_block_label(self) -> str:\n\t\tassert(self.__label is not None)\n\t\treturn self.__label\n\n\t@current_block_label.setter\n\tdef current_block_label(self, value : str) -> None:\n\t\tself.__label = value\n\n\tdef enterBlock(self, label : str) -> None:\n\t\tself.__writer.undent(self.__format_label_definition(label))\n\t\tself.current_block_label = label\n\n\t# Writes a list of parameters to the class-wide output stream.\n\tdef writeParameters(self, that : Routine, result_tuple : Any) -> None:\n\t\tself.__writer.write(\"(\")\n\n\t\tfor parameter in that.parameters:\n\t\t\tif parameter != that.parameters[0]:\n\t\t\t\tself.__writer.write(\", \")\n\t\t\tparameter.visit(self, None)\n\n\t\tif result_tuple[0].type_kind != TypeKind.Void:\n\t\t\tif len(that.parameters) > 0:\n\t\t\t\tself.__writer.write(\", \")\n\t\t\tself.__writer.write(\"%s %s\" % ( self.__format_pointer(result_tuple[1]), self.allocate_value(\"$result\") ))\n\n\t\tself.__writer.write(\")\")\n\n\t# Returns a temporary which holds the result of the binary expression.\n\tdef visitBinaryExpression(self, that : BinaryExpression, argument : Any) -> str:\n\t\tfirst = that.first.visit(self, argument)\n\t\tother = that.other.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\ttype = that.type.visit(self, argument)\n\n\t\tresult = self.allocate_value('binary')\n\t\topcode = self.__auxiliary_opcodes.get(that.operator_kind)\n\t\tif opcode:\n\t\t\tself.__writer.flush(\"%s = %s %s %s, %s\" % ( result, opcode, type, first, other ))\n\t\t\treturn result\n\n\t\tself.__writer.flush(\n\t\t\t\"%s = icmp %s %s %s, %s\" % ( result, self.__relational_opcodes[that.operator_kind], type, first, other )\n\t\t)\n\t\treturn result\n\n\t# Returns a boolean literal expression.\n\tdef visitBooleanLiteral(self, that : BooleanLiteral, argument : Any) -> str:\n\t\treturn { False : 'false', True : 'true' }[that.value]\n\n\t# Returns a boolean type.\n\tdef visitBooleanType(self, that : BooleanType, argument : Any) -> str:\n\t\treturn \"i%u\" % that.width\n\n\t# Calls the specified function and returns the result in a suitable temporary.\n\tdef visitCallExpression(self, that : CallExpression, argument : Any) -> Optional[str]:\n\t\t# Create temporaries, if needed, for all arguments.\n\t\targuments = []\n\t\tfor argument1 in that.arguments:\n\t\t\targuments.append(argument1.visit(self, argument))\n\n\t\t# Change the return value into an 'out' parameter.\n\t\tassert(that.type is not None)\n\t\tif that.type.type_kind != TypeKind.Void:\n\t\t\tresult_type = that.type.visit(self, argument)\n\t\t\tresult_temp = self.allocate_value('result')\n\t\t\tself.__writer.flush(\"; Allocate memory for return value.\")\n\t\t\tself.__writer.flush(\"%s = alloca %s\" % ( result_temp, result_type ))\n\t\telse:\n\t\t\tresult_type = None\n\n\t\tstatus = self.allocate_value(\"$status\")\n\t\tassert(that.type is not None)\n\t\ttype = that.type.visit(self, argument)\n\t\tself.__writer.write(\"%s = call %%Exception* %s(\" % ( status, self.__format_global(that.name.text) ))\n\t\tfor index in range(len(arguments)):\n\t\t\tthat_argument = that.arguments[index]\n\n\t\t\tif index > 0:\n\t\t\t\tself.__writer.write(\", \")\n\n\t\t\tassert(that_argument.type is not None)\n\t\t\tself.__writer.write(\"%s %s\" % ( that_argument.type.visit(self, argument), arguments[index] ))\n\n\t\t# Output synthetic routine result argument.\n\t\tif result_type:\n\t\t\tif len(arguments) > 0:\n\t\t\t\tself.__writer.write(\", \")\n\t\t\tself.__writer.write(\"%s %s\" % ( self.__format_pointer(result_type), result_temp ))\n\n\t\tself.__writer.flush(\") #0\")\n\n\t\t# Store exception value in the already allocated local variable.\n\t\tself.__writer.flush(\"store %%Exception* %s, %%Exception** %s\" % ( status, self.__format_local(\"$exception\") ))\n\n\t\t# Output exception propagation code.\n\t\tself.__writer.flush(\"; Exception-propagation code.\")\n\t\tsuccess = self.allocate_value(\"$success\")\n\t\tself.__writer.flush(\"%s = icmp eq %%Exception* %s, null\" % ( success, status ))\n\t\tlabel = self.allocate_block('return')\n\t\tself.__writer.flush(\"br i1 %s, label %s, label %s\" % ( success, self.__format_label_reference(label), self.__format_label_reference(\"epilogue\") ))\n\t\tself.__writer.flush()\n\n\t\t# Output load of routine return value.\n\t\tself.enterBlock(label)\n\t\tactive : Optional[str] = None\n\t\tif result_type:\n\t\t\tself.__writer.flush(\"; Return the result\")\n\t\t\tactive = self.allocate_value(\"result\")\n\t\t\tself.__writer.flush(\"%s = load %s, %s %s\" % ( active, result_type, self.__format_pointer(result_type), result_temp ))\n\n\t\treturn active\n\n\tdef visitCallStatement(self, that : CallStatement, argument : Any) -> None:\n\t\tself.__print(that)\n\n\t\t# Create temporaries, if needed, for all arguments.\n\t\targuments = []\n\t\tfor argument1 in that.arguments:\n\t\t\targuments.append(argument1.visit(self, argument))\n\n\t\tstatus = self.allocate_value(\"$status\")\n\t\tself.__writer.write(\"%s = call %%Exception* %s(\" % ( status, self.__format_global(that.name.text) ))\n\t\tfor index in range(len(arguments)):\n\t\t\tthat_argument = that.arguments[index]\n\n\t\t\tif index > 0:\n\t\t\t\tself.__writer.write(\", \")\n\n\t\t\tassert(that_argument.type is not None)\n\t\t\tself.__writer.write(\"%s %s\" % ( that_argument.type.visit(self, argument), arguments[index] ))\n\n\t\tself.__writer.flush(\") #0\")\n\n\t\t# Store exception value in the already allocated local variable.\n\t\tself.__writer.flush(\"store %%Exception* %s, %%Exception** %s\" % ( status, self.__format_local(\"$exception\") ))\n\n\t\t# Output exception propagation code.\n\t\tself.__writer.flush(\"; Exception-propagation code.\")\n\t\tsuccess = self.allocate_value(\"$success\")\n\t\tself.__writer.flush(\"%s = icmp eq %%Exception* %s, null\" % ( success, status ))\n\t\tlabel = self.allocate_block('block')\n\t\tself.__writer.flush(\"br i1 %s, label %s, label %%epilogue\" % ( success, self.__format_label_reference(label) ))\n\t\tself.__writer.flush()\n\n\t\tself.enterBlock(label)\n\n\t# Returns a cardinal literal.\n\tdef visitCardinalLiteral(self, that : CardinalLiteral, argument : Any) -> str:\n\t\treturn str(that.value)\n\n\t# Returns a cardinal type wrapped in an LlvmType node.\n\tdef visitCardinalType(self, that : CardinalType, argument : Any) -> str:\n\t\treturn self.__format_type(\"i%u\" % that.width)\n\n\tdef visitClassDefinition(self, that : ClassDefinition, argument : Any) -> None:\n\t\t# TODO: Finish up LlvmWriter.visitClassDefinition.\n\t\tself.__print(that)\n\t\tself.__writer.flush(\"%s = type\" % self.__format_type_definition(that.name.text))\n\t\tself.__writer.indent(\"{\")\n\t\tfor below in that.below:\n\t\t\tif not isinstance(below, FieldDefinition):\n\t\t\t\tcontinue\n\n\t\t\tassert(below.type is not None)\n\t\t\tself.__writer.flush(\"%s ; %s\" % ( below.type.visit(self, argument), below.name.text ))\n\t\tself.__writer.dedent(\"}\")\n\t\tself.__writer.flush()\n\n\tdef visitConstructorExpression(self, that : ConstructorExpression, argument : Any) -> str:\n\t\tstatus = self.allocate_value(\"status\")\n\t\t# TODO: Fix hardcoded allocation request of 128 bytes...\n\t\tresult = self.allocate_value(\"result\")\n\t\tself.__writer.flush(\"%s = alloca i8*\" % result)\n\t\tself.__writer.flush(\"%s = call %%Exception* @$b0_memory_acquire(i%u 128, i8** %s) #0\" % ( status, BITS_PER_WORD, result ))\n\t\tsuccess_value = self.allocate_value(\"$success\")\n\t\tself.__writer.flush(\"%s = icmp eq %%Exception* %s, null\" % ( success_value, status ))\n\n\t\tsuccess_label = self.allocate_block(\"success\")\n\t\tfailure_label = self.allocate_block(\"failure\")\n\t\tself.__writer.flush(\"br i1 %s, label %s, label %s\" % (\n\t\t\t\tsuccess_value, self.__format_label_reference(success_label), self.__format_label_reference(failure_label)\n\t\t\t)\n\t\t)\n\t\tself.__writer.flush()\n\n\t\tself.enterBlock(success_label)\n\t\tstatus1 = self.allocate_value(\"status\")\n\t\tresult1 = self.allocate_value(\"result\")\n\t\tself.__writer.flush(\"%s = load i8*, i8** %s\" % ( result1, result ))\n\t\tself.__writer.flush(\"%s = bitcast i8* %s to %%Exception*\" % ( status1, result1 ))\n\t\t#self.__writer.flush(\"store %%Exception* %s, %%Exception** %s\" % ( status1, self.__format_local(\"$exception\") ))\n\t\tthrow_end = self.allocate_block(\"throw_end\")\n\t\tself.__writer.flush(\"br label %s\" % self.__format_label_reference(throw_end))\n\t\tself.__writer.flush()\n\n\t\tself.enterBlock(failure_label)\n\t\tself.__writer.flush(\"ret %%Exception* %s\" % status)\n\t\tself.__writer.flush()\n\n\t\tself.enterBlock(throw_end)\n\n\t\treturn status1\n\n\t# Outputs LLVM IR code for a Braceless 'create' statement (i.e. nothing, as the local becomes an SSA temporary).\n\tdef visitCreateStatement(self, that : CreateStatement, argument : Any) -> None:\n\t\tself.__print(that)\n\n\t\tname = that.name.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\ttype = that.type.visit(self, argument)\n\t\tresult = that.expression.visit(self, argument)\n\n\t\t# Create local variable using LLVM IR 'alloca' and store the result in it.\n\t\tself.__writer.flush(\"%s = alloca %s\" % ( self.__format_local(name), type ))\n\t\tself.__writer.flush(\"store %s %s, %s* %s\" % ( type, result, type, self.__format_local(name) ))\n\n\tdef visitElifStatement(self, that : IfStatement, argument : Any) -> str:\n\t\treturn self.visitIfStatement(that, argument)\n\n\tdef visitElseStatement(self, that : ElseStatement, argument : Any) -> None:\n\t\tself.__print(that)\n\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\t\tself.__writer.flush(\"br label %s\" % self.__format_label_reference(argument))\n\t\tself.__writer.flush()\n\n\t\tself.enterBlock(argument)\n\n\t# Outputs a complete 'if'/'elif'/'else' compound statement.\n\tdef visitIfElifElseStatement(self, that : IfElifElseStatement, argument : Any) -> None:\n\t\t# Allocate a label to be put AFTER the end of the 'if'/'elif'/'else' statement.\n\t\tafter_label = self.allocate_block('if_tail')\n\n\t\t# Output 'if' block (always present).\n\t\tblock_end_label = that.below[0].visit(self, after_label)\n\n\t\t# Output 'elif' and 'else' blocks, if any.\n\t\tfor below in that.below[1:]:\n\t\t\tself.enterBlock(block_end_label)\n\t\t\tblock_end_label = below.visit(self, after_label)\n\n\t# Outputs the 'if'/'elif' parts of a compound 'if'/'elif'/'else' statement.\n\tdef visitIfStatement(self, that : IfStatement, argument : Any) -> str:\n\t\tself.__print(that)\n\n\t\tcondition = that.condition.visit(self, None)\n\t\tif_body_label = self.allocate_block('if_body')\n\t\telif_label = self.allocate_block('elif')\n\t\tself.__writer.flush(\n\t\t\t\"br i1 %s, label %s, label %s\" % (\n\t\t\t\tcondition,\n\t\t\t\tself.__format_label_reference(if_body_label),\n\t\t\t\tself.__format_label_reference(elif_label)\n\t\t\t)\n\t\t)\n\t\tself.__writer.flush()\n\n\t\tself.enterBlock(if_body_label)\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\t\tself.__writer.flush(\"br label %s\" % self.__format_label_reference(argument))\n\t\tself.__writer.flush()\n\n\t\treturn elif_label\n\n\t# Outputs a single 'let' statement.\n\tdef visitLetStatement(self, that : LetStatement, argument : Any) -> None:\n\t\tself.__print(that)\n\n\t\texpression = that.expression.visit(self, argument)\n\t\tassert(that.expression.type is not None)\n\t\t# TODO: Create Symbol hierarchy and let it handle formatting of types.\n\t\ttype = that.expression.type.visit(self, False)\n\n\t\tresult = self.allocate_value('assignment')\n\t\topcode = that.assignment_kind\n\t\tif opcode == AssignmentKind.Identity:\n\t\t\t# TODO: Avoid adding zero to the expression just to assign it to an LLVM temporary, use the expression directly.\n\t\t\tself.__writer.flush(\"%s = add %s %s, 0\" % ( result, type, expression))\n\t\t\tassert(isinstance(that.name, Name))\n\t\telif opcode == AssignmentKind.Optional:\n\t\t\t# TODO: Generate a conditional assignment of the form: if X == none: X := Y, where X is the lhs and Y is the rhs.\n\t\t\t# TODO: Finish up ConditionalAssignment code generation.\n\t\t\tassert(isinstance(that.name, Name))\n\t\t\tself.__writer.flush(\"%s = icmp %s eq %s, null\" % ( result, type, self.__format_local(that.name.text) ))\n\t\t\tself.__writer.flush(\"br \")\n\t\t\traise InternalError(\"Not implemented yet\")\n\t\telse:\n\t\t\tassert(isinstance(that.name, Name))\n\t\t\toperand = that.name.visit(self, argument)\n\t\t\tself.__writer.flush(\"%s = %s %s %s, %s\" % ( result, self.__assignment_opcodes[opcode], type, operand, expression ))\n\n\t# Outputs 'module' statements.\n\tdef visitModuleDefinition(self, that : ModuleDefinition, argument : Any) -> None:\n\t\tassert(argument == None)\n\n\t\tself.__writer.flush(\";; module %s:\" % that.name.text)\n\t\tself.__writer.flush('source_filename = \"%s\"' % os.path.split(that.cursor.file)[1])\n\t\tself.__writer.flush()\n\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\t# Returns a name ready to be output.\n\tdef visitName(self, that : Name, argument : Any) -> str:\n\t\tif that.namekind in [ NameKind.Entry, NameKind.Function ]:\n\t\t\treturn self.__format_global(that.text)\n\n\t\tif that.namekind == NameKind.Parameter:\n\t\t\treturn self.__format_local(that.text)\n\n\t\tassert(that.namekind == NameKind.Local)\n\t\treturn that.text\n\n\t# TODO: This method seems bogus? Is it needed or just incorrect? Why does a statement suddenly return a type?\n\tdef visitOperatorStatement(self, that : OperatorStatement, argument : Any) -> str:\n\t\tself.__print(that)\n\n\t\tassert(that.type is not None)\n\t\treturn that.type.visit(self, argument)\n\n\t# Outputs a single parameter.\n\tdef visitParameter(self, that : Parameter, argument : Any) -> None:\n\t\tassert(that.type is not None)\n\t\ttype = that.type.visit(self, argument)\n\t\tself.__writer.write(str(type) + \" \")\n\t\tname = that.name.visit(self, argument)\n\t\tself.__writer.write(name)\n\n\t# Outputs the global header to the generated LLVM IR file.\n\tdef visitProgramDefinition(self, that : ProgramDefinition, argument : Any) -> None:\n\t\tself.__writer.flush(\"; Generator: %s\" % banner_message())\n\t\tself.__writer.flush(\"; PLEASE DO NOT EDIT THIS FILE! REPORT THE ISSUE INSTEAD!\")\n\t\tself.__writer.flush()\n\n\t\t# Dynamically load the platform-specific LLVM header.\n\t\tsystem = self.__setup['target.triple']\n\t\tassert(isinstance(system, Target))\n\t\tsystem_data = self.__systems.get(system.triple)\n\t\tif not system_data:\n\t\t\traise InternalError(\"Unknown host machine and system triple: \" + system.triple)\n\t\tself.__writer.flush(system_data)\n\t\tdel system_data\n\t\tdel system\n\n\t\t# Separate header from actual code by a blank line.\n\t\tself.__writer.flush()\n\n\t\t# Write run-time library definitions.\n\t\tself.__writer.flush(\"; Run-time Library Definitions\")\n\t\t#self.__writer.flush(\"declare void @_exit(i32) noreturn\")\n\t\tself.__writer.flush(\"declare i8* @malloc(i%u) #1\" % BITS_PER_WORD)\n\t\t#self.__writer.flush(\"define %%Exception* @__linux_x86_64_mmap(i8*, i64, i32, i32, i32, i64) #0\")\n\t\t#self.__writer.indent(\"{\")\n\t\t#self.__writer.flush('%result = call i8* asm sideeffect \"syscall\", \"=r,{rax},{rdi},)\n\t\t#self.__writer.flush(\"declare void @_exit(i32) #1\")\n\t\tself.__writer.flush()\n\n\t\tself.__writer.flush(\"define %%Exception* @$b0_memory_acquire(i%u %%bytes, i8** %%$result) #0\" % BITS_PER_WORD)\n\t\tself.__writer.indent(\"{\")\n\t\tself.enterBlock(\"prologue\")\n\t\tmemory = self.allocate_value(\"memory\")\n\t\tself.__writer.flush(\"%s = call i8* @malloc(i%u %%bytes) #1\" % ( memory, BITS_PER_WORD ))\n\t\tself.__writer.flush(\"store i8* %s, i8** %s\" % ( memory, self.__format_local(\"$result\") ))\n\t\tstatus = self.allocate_value(\"$status\")\n\t\tself.__writer.flush(\"%s = icmp eq i8* %s, null\" % ( status, memory ))\n\t\tretval = self.allocate_value(\"$retval\")\n\t\tself.__writer.flush(\"%s = select i1 %s, %%Exception* @b0_Error_Memory_Create, %%Exception* null\" % ( retval, status ))\n\t\tself.__writer.flush(\"ret %%Exception* %s\" % retval)\n\t\tself.__writer.dedent(\"}\")\n\t\tself.__writer.flush()\n\n\t\t# Descend towards bottom of tree.\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\t\tself.__reset()\n\n\t\tself.__writer.flush(\"@b0_Error_Memory_Create = internal constant %%Exception { i%u 1 }, align 8\" % BITS_PER_WORD)\n\t\tself.__writer.flush(\"@b0_Error_Memory_Delete = internal constant %%Exception { i%u 2 }, align 8\" % BITS_PER_WORD)\n\t\tself.__writer.flush()\n\n\t\t# TODO: Determine THE ONE AND ONLY entry point if no explicit entry point has been specified using the '-entry' option.\n\t\tif not self.__setup['target.entry']:\n\t\t\traise SetupError(\"No entry point given using the -entry option\")\n\n\t\t# Output a custom main() function so that the program can be linked and executed.\n\t\tself.__writer.flush(\"; Generated main() function.\")\n\t\tself.__writer.flush(\"define dso_local i32 @main(i32 %argc, i8** %argv) #1\")\n\t\tself.__writer.indent(\"{\")\n\t\tself.enterBlock(\"prologue\")\n\n\t\t# Allocate temporary space for the return value.\n\t\tself.__writer.flush(\"; Allocate temporary for return value.\")\n\t\tresult = self.allocate_value('$result')\n\t\tself.__writer.flush(\"%s = alloca i1\" % result)\n\n\t\t# Invoke the entry point, which returns an exception instance (or none) and the actual return code.\n\t\tself.__writer.flush(\"; Call user-defined entry point.\")\n\t\tstatus = self.allocate_value(\"$status\")\n\t\tself.__writer.flush(\"%s = call %%Exception *@%s(i1* %s)\" % ( status, self.__setup['target.entry'], result ))\n\n\t\t# Handle the exception, if any.\n\t\tsuccess_value = self.allocate_value('$success')\n\t\tself.__writer.flush(\"%s = icmp eq %%Exception* %s, null\" % ( success_value, status ))\n\t\tsuccess_label = self.allocate_block(\"success\")\n\t\tself.__writer.flush(\"br i1 %s, label %s, label %%epilogue\" % ( success_value, self.__format_label_reference(success_label) ))\n\t\tself.__writer.flush()\n\n\t\t# Return 0 if 'result' is 1 and vice versa.\n\t\tself.enterBlock(success_label)\n\t\tself.__writer.flush(\"; Convert boolean into zero or one cardinal.\")\n\t\tlocal = self.allocate_value('$result')\n\t\tself.__writer.flush(\"%s = load i1, i1* %s\" % ( local, result ))\n\t\tfinal = self.allocate_value('$retval')\n\t\tself.__writer.flush(\"%s = select i1 %s, i32 0, i32 1\" % ( final, local ))\n\t\tself.__writer.flush(\"ret i32 %s\" % final)\n\t\tself.__writer.flush()\n\n\t\t# Return a special status code indicating that an unhandled exception was detected. (TODO: Add default catch handler).\n\t\tself.enterBlock(\"epilogue\")\n\t\tself.__writer.flush(\"ret i32 127\")\n\t\tself.__writer.dedent(\"}\")\n\t\tself.__writer.flush()\n\n\t# Returns a reference to a symbol wrapped.\n\tdef visitReference(self, that : Reference, argument : Any) -> str:\n\t\treturn self.__format_local(that.text)\n\n\t# Outputs the LLVM IR code for a Braceless0 'return' statement with or without a return expression.\n\tdef visitReturnStatement(self, that : ReturnStatement, argument : Any) -> None:\n\t\tself.__print(that)\n\n\t\tvalue = that.expression.visit(self, argument)\n\t\tassert(that.expression.type is not None)\n\t\ttype = that.expression.type.visit(self, argument)\n\n\t\tif that.expression.type_kind != TypeKind.Void:\n\t\t\tself.__writer.flush(\"store %s %s, %s %s\" % ( type, value, self.__format_pointer(type), self.__format_local(\"$result\") ))\n\t\tself.__writer.flush(\"br label %epilogue\")\n\t\tself.__writer.flush()\n\n\t# Outputs the LLVM IR code for a Braceless0 routine.\n\tdef visitRoutine(self, that : Routine, argument : Any) -> None:\n\t\tself.__print(that)\n\n\t\t# Reset routine-based LLVM temporaries index, etc.\n\t\tself.__reset()\n\n\t\t# Write routine prototype.\n\t\tself.__writer.write(\"define \")\n\t\tif MODE == \"TEST\":\n\t\t\tself.__writer.write(\"external \")\n\t\telse:\n\t\t\tself.__writer.write(\"private \")\n\t\tself.__writer.write(\"%Exception* \")\n\t\tname = that.name.visit(self, None)\n\t\tself.__writer.write(name)\n\t\tassert(that.type is not None)\n\t\ttype = that.type.visit(self, None)\n\t\tself.writeParameters(that, ( that.type, type ))\n\t\tself.__writer.flush(\" #0\")\n\n\t\t# Write routine basic blocks.\n\t\tself.__writer.indent(\"{\")\n\t\tself.enterBlock(\"prologue\")\n\t\tself.__writer.flush(\"; Allocate and initialize local variable holding exception value.\")\n\t\texception = self.allocate_value(\"$exception\")\n\t\tself.__writer.flush(\"%s = alloca %%Exception*\" % exception)\n\t\tself.__writer.flush(\"store %%Exception* null, %%Exception** %s\" % exception)\n\n\t\tfor below in that.below:\n\t\t\tbelow.visit(self, argument)\n\n\t\t# Write exception propagation code.\n\t\tself.enterBlock(\"epilogue\")\n\t\tretval = self.allocate_value(\"$retval\")\n\t\tself.__writer.flush(\"%s = load %%Exception*, %%Exception** %s\" % ( retval, exception ))\n\t\tself.__writer.flush(\"ret %%Exception* %s\" % retval)\n\t\tself.__writer.dedent('}')\n\t\tself.__writer.flush()\n\n\t# Returns a temporary which holds the result of the tertiary expression.\n\tdef visitTertiaryExpression(self, that : TertiaryExpression, argument : Any) -> str:\n\t\tfirst_data = that.first.visit(self, argument)\n\t\tassert(that.first.type is not None)\n\t\tfirst_type = that.first.type.visit(self, argument)\n\t\tother_data = that.other.visit(self, argument)\n\t\tassert(that.other.type is not None)\n\t\tother_type = that.other.type.visit(self, argument)\n\t\tthird_data = that.third.visit(self, argument)\n\t\tassert(that.third.type is not None)\n\t\tthird_type = that.third.type.visit(self, argument)\n\n\t\tthat.type = that.other.type\n\n\t\tresult = self.allocate_value('tertiary')\n\t\tself.__writer.flush(\n\t\t\t\"%s = select i1 %s, %s %s, %s %s\" % ( result, first_data, other_type, other_data, third_type, third_data )\n\t\t)\n\n\t\treturn result\n\n\tdef visitThrowStatement(self, that : ThrowStatement, argument : Any) -> None:\n\t\tthat.local = that.expression.visit(self, argument)\n\t\tself.__exception = that\n\n\t\t# Output code to raise exception.\n\t\tself.__writer.flush(\"ret %%Exception* %s\" % that.local)\n\t\tself.__writer.flush()\n\n\t# Returns a temporary which holds the result of the unary expression.\n\tdef visitUnaryExpression(self, that : UnaryExpression, argument : Any) -> str:\n\t\tfirst = that.first.visit(self, argument)\n\t\tassert(that.type is not None)\n\t\ttype = that.type.visit(self, argument)\n\n\t\tunary = self.allocate_value(\"unary\")\n\t\tself.__writer.flush(\"%s = icmp eq %s %s, 0\" % ( unary, type, first ))\n\n\t\treturn unary\n\n\t# Returns a void literal.\n\tdef visitVoidExpression(self, that : VoidExpression, argument : Any) -> str:\n\t\treturn \"void\"\n\n\t# Returns a void type.\n\tdef visitVoidType(self, that : VoidType, argument : Any) -> str:\n\t\treturn \"void\"\n\n\n#***************************************************** Invoker (External Tools Runner) *******************************************\n\n# Exception thrown on errors during an attempt of invoking an external command.\nclass InvokerError(Error):\n\n\tdef __init__(self, text) -> None:\n\t\tError.__init__(self, text)\n\n\n# Wrapper class that allows the client to locate and/or execute external commands.\nclass Invoker(object):\n\n\tdef execute(self, command, arguments, verbose=False, capture=False) -> Optional[str]:\n\t\t\"\"\"Invokes the specified command with the given arguments and throws an exception if the invokation fails.\"\"\"\n\n\t\tif verbose:\n\t\t\tprint(\"Invoking: %s\" % command, end='')\n\t\t\tfor argument in arguments:\n\t\t\t\tprint(\" %s\" % argument, end='')\n\t\t\tprint()\n\n\t\ttry:\n\t\t\tstatus = subprocess.run([ command ] + arguments, capture_output=True)\n\n\t\t\tif status.returncode:\n\t\t\t\tfor line in status.stderr.decode('utf8').split(os.linesep):\n\t\t\t\t\tprint(\"!! %s\" % line)\n\t\t\t\traise InvokerError(\"Error while invoking command: %s\" % command)\n\n\t\t\tif capture:\n\t\t\t\treturn status.stdout.decode('utf8')\n\n\t\t\treturn None\n\t\texcept subprocess.CalledProcessError:\n\t\t\traise InvokerError(\"Unable to invoke command: %s\" % command)\n\n\t# Attempts to locate the specified command and returns the path to it or 'None' if not found.\n\tdef locate(self, command) -> Optional[str]:\n\t\tpath = os.environ.get(\"PATH\")\n\t\tif not path:\n\t\t\traise InvokerError(\"Could not query PATH environment variable\")\n\n\t\t# Figure out if we need to check against a list of default extensions or not.\n\t\tpathext : List[str] = []\n\t\tif platform.system() == \"Windows\":\n\t\t\t# We need to check against all extensions registered in the PATHEXT environment variable.\n\t\t\tvalue = os.environ.get(\"PATHEXT\")\n\t\t\tassert(value is not None)\n\t\t\tpathext = value.lower().split(os.pathsep)\n\t\t\tdel value\n\n\t\t# ... Everywhere: We always need to check for verbatim matches.\n\t\tpathext += [ \"\" ]\n\n\t\tpaths = path.split(os.pathsep)\n\t\tfor path in paths:\n\t\t\tfor extension in pathext:\n\t\t\t\tattempt = path + os.sep + command + extension\n\t\t\t\tif os.path.isfile(attempt):\n\t\t\t\t\treturn attempt\n\n\t\t# We found nothing.\n\t\treturn None\n\n\n#************************************************** Target Triplet Handling ******************************************************\n\n# A Braceless-specific target triple (os, cpu, bits) used for explicitly stating the target platforms triple.\nclass Target(object):\n\n\tdef __init__(self, triple : str) -> None:\n\t\tif triple.lower() != triple:\n\t\t\traise SetupError('Target triple must be all lowercase')\n\n\t\t# Parse triple into its separate fields.\n\t\tparts = triple.split('-')\n\t\tif len(parts) != 3:\n\t\t\traise SetupError('Invalid target triple (os-cpu-bits): %s' % triple)\n\t\t[ os, cpu, bits ] = parts\n\t\twidth = int(bits)\n\n\t\tif not os in [ 'linux', 'windows' ]:\n\t\t\traise SetupError('Unknown target operating system: %s' % os)\n\t\tif not cpu in [ 'arm', 'x86' ]:\n\t\t\traise SetupError('Unknown target CPU: %s' % cpu)\n\t\tif not width in [ 32, 64 ]:\n\t\t\traise SetupError('Unknown target CPU word size: %u' % width)\n\n\t\t# Assign the instance data.\n\t\tself.__triple = triple\n\t\tself.__os = os\n\t\tself.__cpu = cpu\n\t\tself.__bits = width\n\n\t@property\n\tdef bits(self) -> int:\n\t\treturn self.__bits\n\n\t@property\n\tdef cpu(self) -> str:\n\t\treturn self.__cpu\n\n\t@property\n\tdef os(self) -> str:\n\t\treturn self.__os\n\n\t@property\n\tdef triple(self) -> str:\n\t\treturn self.__triple\n\n\n#************************************************** Command-Line Parameter Parser ************************************************\n\n\"\"\"The help text for the compiler, displayed if the compiler is invoked with the -help option.\"\"\"\nHELP = \"\"\"\nSyntax: \"b0c.py\" *option +source_file\n\nOptions:\n\n -banner:[0|1] Disable or enable program banner (default: Disabled).\n -dump Dump the AST to a log file ending in '.log' (default: Disabled).\n -entry:name Use the specified name as the program entry point (default: The one and only entry point).\n -help|-h|--help Display this help text and exit.\n -license Display the license and exit.\n -output:folder Specify the folder to write output file(s) to (default: Current directory).\n -target: Specify the target os, cpu, and word size using the format 'os-cpu-bits':\n os is one of 'linux', 'windows'.\n cpu is one of 'arm', 'x86'.\n bits is one of '32', '64'.\n -version Display the program version and exit.\n -writer:llvm Generate an LLVM IR file (.ll).\n\nBraceless v0.x is a BOOTSTRAP compiler for the Braceless v1.x project and thus not intended for production use!\n\nBraceless v0.x initially targets LLVM v8+/AMD64 only as I have no use for other bootstrap compilers at this point in time.\n\nThe plan is to write Braceless v0.x in Python v3.7+ and then write the actual production compiler in Braceless v0.x.\n\nPlease notice that the use of prefix notation, instead of the traditional postfix notation, in the \"Syntax:\" line, is intentional\nand not an error. I just happen to think: \"Why place the important information at the back rather than at the front?\"\n\"\"\"\nHELP = HELP.strip()\n\n\"\"\"The license for the compiler, displayed if the compiler is invoked with the -license option.\"\"\"\nLICENSE = \"\"\"\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following\nconditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below.\n in the documentation and/or other materials provided with the distribution.\n * Neither the name of Mikael Egevig nor the names of its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES INCLUDING\nBUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT\nSHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nLICENSE = LICENSE.strip()\n\n\n# Exception class used to report errors discovered while parsing command-line arguments.\nclass SetupError(Error):\n\n\tdef __init__(self, text) -> None:\n\t\tError.__init__(self, text)\n\n\n# Class used to parse arguments from the environment, the command-line, and from response files.\nclass Setup(object):\n\n\tdef __init__(self, values : Dict[str, Any] = {}) -> None:\n\t\tself.__values : Dict[str, Any] = {\n\t\t\t'source.inputs' : [],\n\t\t\t'system.banner' : False,\n\t\t\t'system.dumper' : None,\n\t\t\t'system.origin' : None,\n\t\t\t'target.entry' : None,\n\t\t\t'target.level' : 0,\n\t\t\t'target.output' : None,\n\t\t\t'target.prefix' : None,\n\t\t\t'target.suffix' : None,\n\t\t\t'target.triple' : None,\n\t\t\t'target.writer' : None,\n\t\t\t'target.width' : None,\n\t\t}\n\t\tself.__values.update(values)\n\n\tdef __getitem__(self, name : str) -> Any:\n\t\treturn self.__values[name]\n\n\tdef __setitem__(self, name : str, data : Any) -> None:\n\t\tif not name in self.__values:\n\t\t\traise SetupError('Cannot assign non-existing configuration item: ' + name)\n\n\t\tif self.__values[name]:\n\t\t\traise SetupError('Cannot reassign already assigned configuration item: ' + name)\n\n\t\tself.__values[name] = data\n\n\t# Checks the state of the settings given on the command-line etc. and throws an exception if an error is detected.\n\tdef check(self) -> None:\n\t\tif not self.__values['source.inputs']:\n\t\t\traise SetupError(\"No source file(s) specified\")\n\n\t\t# Check the hidden 'target.prefix' option used only internally.\n\t\tif not self.__values['target.prefix']:\n\t\t\t# Use the base name of the first input file as the base name of all output files.\n\t\t\tbasename = self.__values['source.inputs'][0]\n\t\t\tbasename = os.path.split(basename)[1]\n\t\t\tbasename = os.path.splitext(basename)[0]\n\t\t\tself.__values['target.prefix'] = basename\n\n\t\t# Check the public 'target.output' option (the name of the directory where output is written to).\n\t\tif not self.__values['target.output']:\n\t\t\t# Use the current directory (avoid using './' as it clutters up log files and stuff).\n\t\t\tself.__values['target.output'] = ''\n\t\telse:\n\t\t\t# Ensure that the output directory ('target.output') is properly terminated by a folder separator.\n\t\t\tif self.__values['target.output'][-1] != os.sep:\n\t\t\t\tself.__values['target.output'] += os.sep\n\n\t\t\t# Check that the output directory exists and is a directory.\n\t\t\tif not os.path.isdir(self.__values['target.output']):\n\t\t\t\traise SetupError('Output directory does not exist: ' + self.__values['target.output'])\n\n\t\t# Convert 'True' value of 'system.dumper' into a full path to the output log file.\n\t\tif self.__values['system.dumper'] == True:\n\t\t\tself.__values['system.dumper'] = self.__values['target.output'] + self.__values['target.prefix'] + '.log'\n\n\t\t# Configure target system inferred from the host environment, if not specified with the '-system:os-cpu-bits' option.\n\t\tif not self.__values['target.triple']:\n\t\t\tmachine_mapper = {\n\t\t\t\t'aarch64' : ( 'arm', 64 ),\n\t\t\t\t'armv7l' : ( 'arm', 32 ),\n\t\t\t\t'x86' : ( 'x86', 32 ),\n\t\t\t\t'amd64' : ( 'x86', 64 ),\n\t\t\t\t'x86_64' : ( 'x86', 64 ),\n\t\t\t}\n\t\t\t( machine, width ) = machine_mapper[platform.machine().lower()]\n\t\t\tsystem = platform.system().lower()\n\t\t\tself.__values['target.triple'] = Target(\"%s-%s-%u\" % ( system, machine, width ))\n\n\t\t# Check that the '-entry:name' option has been given (currently mandatory, will be inferred later on).\n\t\tif not self.__values['target.entry']:\n\t\t\traise SetupError('Missing option: -entry:name')\n\n\t# Parses a single argument and updates the class' 'values' property with the parsed value.\n\tdef parseArgument(self, argument : str) -> None:\n\t\t# If an empty argument, reject it.\n\t\tif len(argument) == 0:\n\t\t\traise SetupError(\"Empty argument detected\")\n\n\t\t# If a named argument, parse it.\n\t\tif argument[0] == '-':\n\t\t\tcolon = argument.find(':')\n\t\t\tif colon == -1:\n\t\t\t\tname = argument[1:]\n\t\t\t\tdata = None\n\t\t\telse:\n\t\t\t\tname = argument[1:colon]\n\t\t\t\tdata = argument[colon + 1:].strip()\n\t\t\t\tif len(data) == 0:\n\t\t\t\t\tdata = None\n\n\t\t\tif name == \"banner\":\n\t\t\t\tif not data:\n\t\t\t\t\traise SetupError(\"Missing argument in option: \" + argument)\n\t\t\t\tself.__values['system.banner'] = { '0' : False, '1' : True }[data]\n\t\t\telif name == \"dump\":\n\t\t\t\tif data:\n\t\t\t\t\traise SetupError(\"Extraneous argument in option: \" + argument)\n\t\t\t\tself.__values['system.dumper'] = True\n\t\t\telif name == \"entry\":\n\t\t\t\tif not data:\n\t\t\t\t\traise SetupError(\"Missing argument in option: \" + argument)\n\t\t\t\tself.__values['target.entry'] = data\n\t\t\telif name in [ \"help\", \"h\", \"-help\" ]:\n\t\t\t\t# Always display the banner when showing the help text (for the sake of less user confusion).\n\t\t\t\tself.__values['system.banner'] = True\n\t\t\t\traise ShowTextAndExitProgram(HELP)\n\t\t\telif name == \"license\":\n\t\t\t\t# Always display the banner when showing the license (for the sake of less user confusion).\n\t\t\t\tself.__values['system.banner'] = True\n\t\t\t\traise ShowTextAndExitProgram(LICENSE)\n\t\t\telif name == \"optimize\":\n\t\t\t\tif not data:\n\t\t\t\t\traise SetupError(\"MIssing argument in option: \" + argument)\n\t\t\t\ttry:\n\t\t\t\t\tvalue = int(data)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise SetupError(\"Invalid argument in option: \" + argument)\n\t\t\t\tif value < 0 or value > 3:\n\t\t\t\t\traise SetupError(\"Argument out of range in option: \" + argument)\n\t\t\t\tself.__values['target.level'] = value\n\t\t\telif name == \"output\":\n\t\t\t\tif not data:\n\t\t\t\t\traise SetupError(\"Missing argument in option: \" + argument)\n\t\t\t\tself.__values['target.output'] = data\n\t\t\telif name == 'target':\n\t\t\t\tif not data:\n\t\t\t\t\traise SetupError(\"Missing argument in option: \" + argument)\n\t\t\t\tself.__values['target.triple'] = Target(data)\n\t\t\telif name == \"version\":\n\t\t\t\t# Don't display the banner when showing version information (for the sake of scripts).\n\t\t\t\tself.__values['system.banner'] = False\n\t\t\t\traise ShowTextAndExitProgram(VERSION)\n\t\t\telif name == \"writer\":\n\t\t\t\tif not data:\n\t\t\t\t\traise SetupError(\"Missing argument in option: \" + argument)\n\t\t\t\tself.__values['target.writer'] = { 'llvm' : LlvmWriter }[data]\n\t\t\t\tself.__values['target.suffix'] = { 'llvm' : '.ll' }[data]\n\t\t\telse:\n\t\t\t\traise SetupError(\"Unknown option encountered: \" + argument)\n\n\t\t\treturn\n\n\t\t# If a positional argument, parse it.\n\t\tif os.path.splitext(argument)[1] != '.b0':\n\t\t\traise SetupError(\"Extension of source modules must be '.b0'\")\n\t\tself.__values['source.inputs'].append(argument)\n\n\t# Parses the \"origin\" aka the startup folder for the compiler.\n\tdef parseOrigin(self, value : str) -> None:\n\t\tif self.__values['system.origin'] != None:\n\t\t\traise SetupError(\"Origin already specified: \" + value)\n\n\t\tabsolute = os.path.abspath(value)\n\t\t( path, name ) = os.path.split(absolute)\n\t\tself.__values['system.origin'] = path\n\n\t\t# Ensure the origin ends in a slash so that we don't need to specify it every time we use 'system.origin'.\n\t\tif self.__values['system.origin'][-1] != os.sep:\n\t\t\tself.__values['system.origin'] += os.sep\n\n\n#********************************************************* Program Entry Point ***************************************************\n\n# Returns the version and copyright information for this product.\ndef banner_message() -> str:\n\treturn \"Braceless v%s - Copyright (c) %s %s. All Rights Reserved. - %s\" % ( VERSION, YEARSTR, COMPANY, WEBSITE )\n\n\n# Manages the \"output state\" of the program banner so that it gets output at the most once.\nclass Banner(object):\n\n\tdef __init__(self) -> None:\n\t\tself.__done = False\n\n\tdef print(self) -> None:\n\t\tif self.__done:\n\t\t\treturn\n\n\t\tprint(banner_message())\n\t\tprint()\n\n\t\tself.__done = True\n\n\n# The entry point for the compiler.\ndef main(arguments : List[str]) -> int:\n\t# Assume failure (simplifies the exception handlers below).\n\tstatus = 1\n\n\t# Prepare to display the banner (optionally or mandatory if success or failure).\n\tbanner = Banner()\n\n\t# The main driver code.\n\ttry:\n\t\t# Parse command-line options.\n\t\tsetup = Setup()\n\t\tsetup.parseOrigin(arguments[0])\n\t\tfor argument in arguments[1:]:\n\t\t\tsetup.parseArgument(argument)\n\n\t\t# Check that the given and inferred setup is complete and valid.\n\t\tsetup.check()\n\n\t\t# Don't support Windows just yet as it is a truly cumbersome and tedious platform to support.\n\t\tif setup['target.triple'] == 'windows':\n\t\t\traise SetupError('Windows hosts are not yet supported')\n\n\t\t# Print the banner (copyright, website, and legal stuff).\n\t\tif setup['system.banner']:\n\t\t\tbanner.print()\n\n\t\t# Parse each input file in turn.\n\t\tmodules : List[Node] = []\n\t\tfor input in setup['source.inputs']:\n\t\t\tif not os.path.isfile(input):\n\t\t\t\traise SetupError(\"File not found: \" + input)\n\n\t\t\tmodules.append(Parser().parse(input))\n\n\t\t# Create global, outermost, topmost scope.\n\t\tprogram = ProgramDefinition(Cursor(\"\"), modules)\n\n\t\t# Assign a unique numerical id to each node in the abstract syntax tree.\n\t\tEnumerator().visit(program)\n\n\t\t# Check that all nodes, except the ProgramDefinition node, have a parent.\n\t\tOrphanChecker().visit(program)\n\n\t\t# Create global symbol table, which is used during all resolution passes.\n\t\tsymbols = Environment()\n\n\t\t# Identify all outer identifiers - insert new symbols into and look up references to symbols in the given symbol table.\n\t\tOuterResolver(symbols).visit(program)\n\n\t\t# Identify all inner identifiers - insert new symbols into and look up references to symbols in the given symbol table.\n\t\tInnerResolver(symbols).visit(program)\n\n\t\t# Dump the abstract syntax tree to the specified log file.\n\t\tif setup['system.dumper']:\n\t\t\tstream = TextWriter(setup['system.dumper'])\n\t\t\tDumper().visit(program, stream)\n\t\t\tstream.close()\n\n\t\t# Perform static checks (does not alter the AST).\n\t\tChecker().visit(program)\n\n\t\t# Determine file name WITHOUT extension of all output files (includes the proper output folder name).\n\t\tfilename = setup['target.output'] + setup['target.prefix']\n\n\t\t# Write the output using the specified writer (does not alter the AST).\n\t\tif setup['target.writer']:\n\t\t\tstream = TextWriter(filename + setup['target.suffix'])\n\t\t\tsetup['target.writer'](setup, stream).visit(program)\n\t\t\tstream.close()\n\n\t\tinvoker = Invoker()\n\t\tif False:\n\t\t\t# Invoke llvm-as to convert .ll file into .bc file.\n\t\t\tllvm_as = invoker.locate('llvm-as') or invoker.locate('llvm-as-9') or invoker.locate('llvm-as-8')\n\t\t\tif not llvm_as:\n\t\t\t\traise SetupError('llvm-as not found in path')\n\t\t\tinvoker.execute(llvm_as, [ '-o', filename + '.bc', filename + '.ll' ])\n\t\t\tdel llvm_as\n\n\t\t\t# Invoke llvm-opt to optimize the assembled output.\n\t\t\tllvm_opt = invoker.locate('opt') or invoker.locate('opt-9') or invoker.locate('opt-8')\n\t\t\tif not llvm_opt:\n\t\t\t\traise SetupError('llvm-opt not found in path')\n\t\t\tinvoker.execute(llvm_opt, [ '-O%u' % setup['target.level'], '--mem2reg', '-o', filename + '.bco', filename + '.bc', ])\n\t\t\tdel llvm_opt\n\n\t\t\t# Invoke llc to generate .o file from .bco file.\n\t\t\tllvm_llc = invoker.locate('llc') or invoker.locate('llc-9') or invoker.locate('llc-8')\n\t\t\tif not llvm_llc:\n\t\t\t\traise SetupError('llc not found in path')\n\t\t\tinvoker.execute(llvm_llc, [ '-filetype=obj', '-O=%u' % setup['target.level'], '-o', filename + '.o', filename + '.bco' ])\n\t\t\tdel llvm_llc\n\n\t\t\t# Invoke 'llvm-objdump' to generate .asm from .o file.\n\t\t\tllvm_objdump = invoker.locate('llvm-objdump') or invoker.locate('llvm-objdump-9') or invoker.locate('llvm-objdump-8')\n\t\t\tif not llvm_objdump:\n\t\t\t\traise SetupError('llvm-objdump not found in path')\n\t\t\tstdout = invoker.execute(llvm_objdump, [ '-disassemble', filename + '.o' ], capture=True)\n\t\t\tif stdout:\n\t\t\t\topen(filename + '.asm', 'wt').write(EOL.join(stdout.split(os.linesep)))\n\t\t\telse:\n\t\t\t\tos.unlink(filename + '.asm')\n\t\t\tdel llvm_objdump\n\n\t\t\t# Invoke ld to generate executable from .o file.\n\t\t\tllvm_lld = invoker.locate('ld.lld')\n\t\t\tif not llvm_lld:\n\t\t\t\traise SetupError('ld not found in path')\n\t\t\tinvoker.execute(llvm_lld, [ '-L', '/usr/lib', '-l', 'c', '-o', filename, filename + '.o' ])\n\t\t\tdel llvm_lld\n\t\telse:\n\t\t\t# For now use Clang: I simply can't get the code to run without Clang - I get a core dump no matter what I do...\n\t\t\tclang = invoker.locate('clang') or invoker.locate('clang-9') or invoker.locate('clang-8')\n\t\t\tif not clang:\n\t\t\t\traise SetupError('clang not found in path')\n\n\t\t\tinvoker.execute(clang, [ '-O%u' % setup['target.level'], '-o', filename, filename + '.ll' ])\n\t\t\tinvoker.execute(clang, [ '-c', '-O%u' % setup['target.level'], '-o', filename + '.o', filename + '.ll' ])\n\n\t\t\t# Invoke 'llvm-objdump' to generate .asm from .o file.\n\t\t\tllvm_objdump = invoker.locate('llvm-objdump') or invoker.locate('llvm-objdump-9') or invoker.locate('llvm-objdump-8')\n\t\t\tif not llvm_objdump:\n\t\t\t\traise SetupError('llvm-objdump not found in path')\n\t\t\tstdout = invoker.execute(llvm_objdump, [ '-disassemble', filename + '.o' ], capture=True)\n\t\t\tif stdout:\n\t\t\t\topen(filename + '.asm', 'wt').write(EOL.join(stdout.split(os.linesep)))\n\t\t\telse:\n\t\t\t\tos.unlink(filename + '.asm')\n\t\t\tdel llvm_objdump\n\n\t\tdel filename\n\t\tdel invoker\n\n\t\tstatus = 0\n\texcept ShowTextAndExitProgram as that:\n\t\tif setup['system.banner']:\n\t\t\tbanner.print()\n\t\tprint(that)\n\texcept Error as that:\n\t\tbanner.print()\n\t\tprint(that)\n\n\t\t# Print a full stack trace while developing the product.\n\t\tif MODE == \"TEST\":\n\t\t\traise\n\texcept Exception as that:\n\t\tbanner.print()\n\t\tprint(\"Unhandled Error: %s\" % that)\n\n\t\t# Print a full stack trace while developing the product.\n\t\tif MODE == \"TEST\":\n\t\t\traise\n\n\treturn status\n\n\n# Python magic to invoke the entry point routine (main) and return its result code to the run-time environment.\nif __name__ == \"__main__\":\n\tstatus = main(sys.argv)\n\tsys.exit(status)\n\n\n#************************************************************ Unit Tests *********************************************************\n\nimport unittest\n\n# The unit tests can be executed using the following command:\n#\n# Linux:\n#\n# python3 -m unittest -q b0c\n#\n# Windows:\n#\n# python -m unittest -q b0c\n\n\n# Formats a string so that non-printable characters are nicely formatted and thus made printable.\ndef format_string(value : str) -> str:\n\tif value == None:\n\t\treturn \"(none)\"\n\n\tresult = \"\"\n\tfor char in value:\n\t\tif char == SPC:\n\t\t\tresult += char\n\t\telse:\n\t\t\tresult += str(CharacterFormatter(char))\n\n\treturn result\n\n\n# Wrapper class used to invoke the lexer and verify that its output is as expected.\nclass LexerRunner(object):\n\n\t# Invoke the lexer to scan 'source' and compare its output with the specified list of errors and/or tokens.\n\tdef __init__(self, source : List[str], errors : List[LexerError], tokens : List[Token]) -> None:\n\t\t_errors : List[LexerError] = []\n\t\t_tokens : List[Token] = []\n\t\ttry:\n\t\t\t_tokens = Lexer().scan(\"\", EOL.join(source))\n\t\texcept LexerError as that:\n\t\t\t_errors.append(that)\n\n\t\ttry:\n\t\t\t# Check that we got the expected number of errors.\n\t\t\tif len(_errors) != len(errors):\n\t\t\t\traise Error(\"Error count mismatch: Found %u, expected %u\" % ( len(_errors), len(errors)))\n\n\t\t\t# Check that each error is as expected.\n\t\t\tfor index in range(len(errors)):\n\t\t\t\tfirst_error = errors[index]\n\t\t\t\tother_error = _errors[index]\n\n\t\t\t\t#if first.kind != other.kind:\n\t\t\t\t#\traise Error(\"Error[%u]: Kind mismatch\" % index)\n\t\t\t\tif first_error.text != other_error.text:\n\t\t\t\t\traise Error(\"Error[%u]: Text mismatch\" % index)\n\n\t\t\t# Check that we got the expected number of tokens.\n\t\t\tif len(_tokens) != len(tokens):\n\t\t\t\traise Error(\"Token count mismatch: Found %u, expected %u\" % ( len(_tokens), len(tokens)))\n\n\t\t\t# Check that each token is as expected.\n\t\t\tfor index in range(len(tokens)):\n\t\t\t\tfirst_token = tokens[index]\n\t\t\t\tother_token = _tokens[index]\n\n\t\t\t\tformat = \"Token[%u]: %s mismatch; expected '%s' but found '%s'\"\n\n\t\t\t\t# Compare file names and fail if different.\n\t\t\t\tif first_token.cursor.file != other_token.cursor.file:\n\t\t\t\t\traise Error(format % ( index, \"Token\", first_token.cursor.file, other_token.cursor.file ))\n\n\t\t\t\t# Compare line numbers and fail if different.\n\t\t\t\tif first_token.cursor.line != other_token.cursor.line:\n\t\t\t\t\traise Error(format % ( index, \"Line\", first_token.cursor.line, other_token.cursor.line ))\n\n\t\t\t\t# Compare character offsets and fail if different.\n\t\t\t\tif first_token.cursor.char != other_token.cursor.char:\n\t\t\t\t\traise Error(format % ( index, \"Char\", first_token.cursor.char, other_token.cursor.char ))\n\n\t\t\t\t# Compare token kinds and fail if different.\n\t\t\t\tif first_token.kind != other_token.kind:\n\t\t\t\t\traise Error(format % ( index, \"Kind\", first_token.kind, other_token.kind ))\n\n\t\t\t\t# Compare token texts and fail if different.\n\t\t\t\tif first_token.text != other_token.text:\n\t\t\t\t\traise Error(format % ( index, \"Text\", format_string(first_token.text), format_string(other_token.text) ))\n\n\t\texcept Error as that:\n\t\t\t# Print the list of found errors.\n\t\t\tprint()\n\t\t\tprint(\"%u errors:\" % len(_errors))\n\t\t\tfor error in _errors:\n\t\t\t\tprint(\" %s\" % error)\n\t\t\tprint()\n\n\t\t\t# Print the list of scanned tokens.\n\t\t\tprint(\"%u tokens:\" % len(_tokens))\n\t\t\tfor token in _tokens:\n\t\t\t\tprint(\" %s\" % token)\n\t\t\tprint()\n\n\t\t\traise\n\n\n# The lexer test suite.\nclass TestLexerClass(unittest.TestCase):\n\n\tdef test_empty_file_yields_nothing(self) -> None:\n\t\tsource : List[str] = []\n\t\terrors : List[LexerError] = []\n\t\ttokens : List[Token] = [\n\t\t\tToken(Cursor(\"\", 1, 1), TokenKind.EndOfFile, \"(eof)\")\n\t\t]\n\t\tLexerRunner(source, errors, tokens)\n\n\tdef test_empty_line_yields_eol(self) -> None:\n\t\tsource : List[str] = [ EOL ]\n\t\terrors : List[LexerError] = []\n\t\ttokens : List[Token] = [\n\t\t\tToken(Cursor(\"\", 1, 1), TokenKind.EndOfLine, EOL),\n\t\t\tToken(Cursor(\"\", 2, 1), TokenKind.EndOfFile, \"(eof)\")\n\t\t]\n\t\tLexerRunner(source, errors, tokens)\n\n\tdef test_comment_only(self) -> None:\n\t\tsource : List[str] = [ \"# This is a comment\" + EOL ]\n\t\terrors : List[LexerError] = []\n\t\ttokens : List[Token] = [\n\t\t\tToken(Cursor(\"\", 1, 1), TokenKind.Comment, \"# This is a comment\"),\n\t\t\tToken(Cursor(\"\", 1, 20), TokenKind.EndOfLine, EOL),\n\t\t\tToken(Cursor(\"\", 2, 1), TokenKind.EndOfFile, \"(eof)\")\n\t\t]\n\t\tLexerRunner(source, errors, tokens)\n\n", "sub_path": "b0c.py", "file_name": "b0c.py", "file_ext": "py", "file_size_in_byte": 185020, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 106, "usage_type": "call"}, {"api_name": "string.whitespace", "line_number": 129, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 227, "usage_type": "name"}, {"api_name": "io.TextIOWrapper", "line_number": 227, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 287, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 288, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 289, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 290, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 291, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 292, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 293, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 294, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 295, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 296, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 297, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 298, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 299, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 300, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 301, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 302, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 303, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 304, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 305, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 306, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 307, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 308, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 309, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 310, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 311, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 312, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 313, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 314, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 315, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 316, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 317, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 318, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 319, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 320, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 321, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 322, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 323, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 324, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 325, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 326, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 327, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 328, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 329, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 330, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 331, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 332, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 333, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 334, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 335, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 336, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 337, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 338, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 339, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 340, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 341, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 342, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 343, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 344, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 345, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 346, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 347, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 348, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 349, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 350, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 351, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 352, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 353, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 354, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 355, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 356, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 357, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 358, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 359, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 440, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 442, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 443, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 444, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 445, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 446, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 447, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 448, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 449, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 450, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 451, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 452, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 453, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 454, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 455, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 456, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 457, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 458, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 474, "usage_type": "attribute"}, {"api_name": "string.ascii_letters", "line_number": 476, "usage_type": "attribute"}, {"api_name": "string.ascii_letters", "line_number": 498, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 498, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 602, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 515, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 680, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 683, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 685, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 687, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 691, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 693, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 694, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 695, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 696, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 697, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 698, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 699, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 700, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 701, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 702, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 703, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 704, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 705, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 706, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 707, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 708, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 709, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 710, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 711, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 756, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 763, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 765, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 766, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 767, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 768, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 769, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 770, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 780, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 782, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 785, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 815, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 832, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 831, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 847, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 854, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 861, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 877, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 884, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 904, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 912, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 921, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 928, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 952, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 959, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 962, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 969, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 979, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 986, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 989, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 996, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 999, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 1004, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 1006, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1007, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1008, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1009, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 1010, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 1010, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1011, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1012, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 1015, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1052, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1082, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1094, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1095, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1119, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1122, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 1127, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 1129, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1130, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1131, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1132, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 1135, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1151, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1152, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1181, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1188, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1199, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1215, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1231, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1245, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1254, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1272, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 1276, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 1278, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1279, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1280, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 1286, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1301, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1308, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1317, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1335, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1342, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1350, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1381, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1388, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1397, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1404, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1427, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1434, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1447, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1455, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1467, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1479, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1505, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1515, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1536, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1556, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1566, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1586, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 1591, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 1593, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1594, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1595, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1596, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1597, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1598, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1599, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1600, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1601, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1602, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1603, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1604, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1605, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 1606, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 1629, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1639, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1670, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1684, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1698, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1725, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1740, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1760, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 1760, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1765, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1768, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1808, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1809, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1822, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1884, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1887, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1890, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1893, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1896, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1899, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1902, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1905, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1908, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1911, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1914, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1917, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1920, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1923, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1926, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1929, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1932, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1935, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1938, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1941, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1944, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1947, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1950, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1953, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1956, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1959, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1962, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1965, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1968, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1971, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1974, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1977, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1980, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1983, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1986, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1989, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1992, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 1995, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2002, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2005, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2014, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2018, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2023, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2028, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2031, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2037, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2043, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2048, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2051, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2057, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2063, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2070, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2076, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2081, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2087, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2092, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2098, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2103, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2108, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2114, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2117, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2120, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2123, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2127, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2132, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2135, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2139, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2143, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2147, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2152, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2157, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2163, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2167, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2171, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2175, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2178, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2189, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2200, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2210, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2344, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2347, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2356, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2359, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2362, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2373, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2384, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2387, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2390, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2393, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2404, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2411, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2414, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2417, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2424, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2433, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2436, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2439, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2442, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2445, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2459, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2467, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2470, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2475, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2483, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2504, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2511, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2521, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2527, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2534, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 2537, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 2589, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 2589, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 2643, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 2849, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 2915, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 2961, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 2996, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3004, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3022, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3021, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3043, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3042, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3065, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3163, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3177, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3200, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3260, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3258, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3348, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3370, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3374, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3396, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3400, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3406, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3415, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3419, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3423, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3428, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3431, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3436, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3439, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3456, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3462, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3465, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3468, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3473, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3476, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3480, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3495, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3500, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3511, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3516, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3519, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3532, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3541, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3546, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3549, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3555, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3565, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3571, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3576, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3580, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3585, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3589, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3593, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3598, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3602, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3612, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3616, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3619, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3622, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3631, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3635, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3639, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3644, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3647, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3655, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3659, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3663, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3670, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3673, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3678, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3683, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3687, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 3707, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 3708, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3710, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3724, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3728, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3731, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 3733, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 3749, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 3751, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 3764, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3768, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3771, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3775, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3784, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3792, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3796, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3800, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3808, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3812, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 3819, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3819, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 3820, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3820, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 3831, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3835, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3839, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3843, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3847, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3862, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3878, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3891, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3895, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3900, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3905, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 3960, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3972, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 3991, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4003, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4007, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4011, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 4049, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4057, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 4248, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 4260, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 4263, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 4265, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 4267, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4324, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4340, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4358, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4362, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4366, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 4416, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 4366, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4424, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4459, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4463, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4466, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4480, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4516, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4528, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4531, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4543, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4556, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4580, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4607, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 4611, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4611, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 4618, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4629, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4636, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4644, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4739, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4743, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4756, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4796, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4816, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4825, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4836, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 4840, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 4866, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 4869, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 4877, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 4856, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 4882, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 4882, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 4887, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 4888, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 4890, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 4890, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 4892, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 4898, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 4901, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 4902, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4902, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 4881, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 5017, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 5017, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 5018, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 5018, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 5034, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 5037, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 5055, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5055, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 5056, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5056, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 5065, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 5066, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 5069, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5069, "usage_type": "attribute"}, {"api_name": "platform.machine", "line_number": 5085, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 5086, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 5164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5164, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5173, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 5174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5174, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 5178, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 5179, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 5206, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5233, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 5235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5235, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 5305, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 5307, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 5331, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 5333, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 5364, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 5365, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 5402, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5403, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5404, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 5474, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 5477, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5478, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5479, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5485, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5486, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5487, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5494, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5495, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 5496, "usage_type": "name"}]}